├── README.md ├── data ├── __init__.py ├── __init__.pyc ├── datasets.py ├── datasets.pyc ├── transform.py └── transform.pyc ├── datasets ├── kitti │ ├── test.txt │ ├── train.txt │ └── val.txt └── vkitti │ ├── test.txt │ ├── train.txt │ └── val.txt ├── img └── framework.png ├── models ├── __init__.py ├── __init__.pyc ├── base_model.py ├── base_model.pyc ├── fs_model.py ├── ft_model.py ├── gasda_model.py ├── networks.py └── networks.pyc ├── options ├── __init__.py ├── __init__.pyc ├── base_options.py ├── base_options.pyc ├── test_options.py ├── test_options.pyc ├── train_options.py └── train_options.pyc ├── test.py ├── train.py └── utils ├── bilinear_sampler.py ├── dataset_util.py ├── image_pool.py └── util.py /README.md: -------------------------------------------------------------------------------- 1 | # GASDA 2 | This is the PyTorch implementation for our CVPR'19 paper: 3 | 4 | **S. Zhao, H. Fu, M. Gong and D. Tao. Geometry-Aware Symmetric Domain Adaptation for Monocular Depth Estimation. [PAPER](https://sshan-zhao.github.io/papers/gasda.pdf) [POSTER](https://sshan-zhao.github.io/papers/gasda_poster.pdf)** 5 | 6 | ![Framework](https://github.com/sshan-zhao/GASDA/blob/master/img/framework.png) 7 | 8 | ## Environment 9 | 1. Python 3.6 10 | 2. PyTorch 0.4.1 11 | 3. CUDA 9.0 12 | 4. Ubuntu 16.04 13 | 14 | ## Datasets 15 | [KITTI](http://www.cvlibs.net/datasets/kitti/raw_data.php) 16 | 17 | [vKITTI](https://europe.naverlabs.com/Research/Computer-Vision/Proxy-Virtual-Worlds/) 18 | 19 | Prepare the two datasets according to the datalists (*.txt in [datasets](https://github.com/sshan-zhao/GASDA/tree/master/datasets)) 20 | ``` 21 | datasets 22 | |----kitti 23 | |----2011_09_26 24 | |----2011_09_28 25 | |----......... 26 | |----vkitti 27 | |----rgb 28 | |----0006 29 | |-----....... 30 | |----depth 31 | |----0006 32 | |----....... 33 | ``` 34 | 35 | ## Training (Tesla V100, 16GB) 36 | - Train [CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) using the official experimental settings, or download our [pretrained models](https://1drv.ms/f/s!Aq9eyj7afTjMcZorokRKW4ATgZ8). 37 | 38 | - Train F_t 39 | ``` 40 | python train.py --model ft --gpu_ids 0 --batchSize 8 --loadSize 256 1024 --g_tgt_premodel ./cyclegan/G_Tgt.pth 41 | ``` 42 | 43 | - Train F_s 44 | ``` 45 | python train.py --model fs --gpu_ids 0 --batchSize 8 --loadSize 256 1024 --g_src_premodel ./cyclegan/G_Src.pth 46 | ``` 47 | 48 | - Train GASDA using the pretrained F_s, F_t and CycleGAN. 49 | ``` 50 | python train.py --freeze_bn --freeze_in --model gasda --gpu_ids 0 --batchSize 3 --loadSize 192 640 --g_src_premodel ./cyclegan/G_Src.pth --g_tgt_premodel ./cyclegan/G_Tgt.pth --d_src_premodel ./cyclegan/D_Src.pth --d_tgt_premodel ./cyclegan/D_Tgt.pth --t_depth_premodel ./checkpoints/vkitti2kitti_ft_bn/**_net_G_Depth_T.pth --s_depth_premodel ./checkpoints/vkitti2kitti_fs_bn/**_net_G_Depth_S.pth 51 | ``` 52 | Note: this training strategy is different from that in our paper. 53 | 54 | ## Test 55 | [MODELS](https://drive.google.com/open?id=1CvuGUTObRhpZpSTYxy-BIRhft6ttMJOP). 56 | 57 | Copy the provided models to GASDA/checkpoints/vkitti2kitti_gasda/, and rename the models 1_* (e.g., 1_net_D_Src.pth), and then 58 | ``` 59 | python test.py --test_datafile 'test.txt' --which_epoch 1 --model gasda --gpu_ids 0 --batchSize 1 --loadSize 192 640 60 | ``` 61 | ## Citation 62 | If you use this code for your research, please cite our paper. 63 | ``` 64 | @inproceedings{zhao2019geometry, 65 | title={Geometry-Aware Symmetric Domain Adaptation for Monocular Depth Estimation}, 66 | author={Zhao, Shanshan and Fu, Huan and Gong, Mingming and Tao, Dacheng}, 67 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 68 | pages={9788--9798}, 69 | year={2019} 70 | } 71 | ``` 72 | ## Acknowledgments 73 | Code is inspired by [T^2Net](https://github.com/lyndonzheng/Synthetic2Realistic) and [CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix). 74 | 75 | ## Contact 76 | Shanshan Zhao: szha4333@uni.sydney.edu.au or sshan.zhao00@gmail.com 77 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from torchvision.transforms import Compose, Normalize, ToTensor 4 | from data.datasets import get_dataset, ConcatDataset 5 | from data.transform import RandomImgAugment, DepthToTensor 6 | 7 | def create_test_dataloader(args): 8 | 9 | joint_transform_list = [RandomImgAugment(True, True, True, args.loadSize)] 10 | img_transform_list = [ToTensor(), Normalize([.5, .5, .5], [.5, .5, .5])] 11 | 12 | joint_transform = Compose(joint_transform_list) 13 | 14 | img_transform = Compose(img_transform_list) 15 | 16 | depth_transform = Compose([DepthToTensor()]) 17 | 18 | dataset = get_dataset(root=args.root, data_file=args.test_datafile, phase='test', 19 | dataset=args.tgt_dataset, img_transform=img_transform, joint_transform=joint_transform, 20 | depth_transform=None, test_dataset=args.test_dataset) 21 | loader = torch.utils.data.DataLoader( 22 | dataset, 23 | batch_size=1, shuffle=False, 24 | num_workers=int(args.nThreads), 25 | pin_memory=True) 26 | 27 | return loader 28 | 29 | def create_train_dataloader(args): 30 | joint_transform_list = [RandomImgAugment(args.no_flip,args.no_rotation, args.no_augment, args.loadSize)] 31 | img_transform_list = [ToTensor(), Normalize([.5, .5, .5], [.5, .5, .5])] 32 | 33 | joint_transform = Compose(joint_transform_list) 34 | 35 | img_transform = Compose(img_transform_list) 36 | 37 | depth_transform = Compose([DepthToTensor()]) 38 | 39 | src_dataset = get_dataset(root=args.src_root, data_file=args.src_train_datafile, phase='train', 40 | dataset=args.src_dataset, 41 | img_transform=img_transform, depth_transform=depth_transform, 42 | joint_transform=joint_transform) 43 | 44 | 45 | 46 | tgt_dataset = get_dataset(root=args.tgt_root, data_file=args.tgt_train_datafile, phase='train', 47 | dataset=args.tgt_dataset, 48 | img_transform=img_transform, joint_transform=joint_transform, 49 | depth_transform=depth_transform) 50 | 51 | loader = torch.utils.data.DataLoader( 52 | ConcatDataset( 53 | src_dataset, 54 | tgt_dataset, 55 | ), 56 | batch_size=args.batchSize, shuffle=True, 57 | num_workers=int(args.nThreads), 58 | pin_memory=True) 59 | 60 | return loader 61 | 62 | 63 | def create_dataloader(args): 64 | 65 | if not args.isTrain: 66 | return create_test_dataloader(args) 67 | 68 | else: 69 | return create_train_dataloader(args) 70 | -------------------------------------------------------------------------------- /data/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/data/__init__.pyc -------------------------------------------------------------------------------- /data/datasets.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import glob 3 | import os 4 | import os.path as osp 5 | 6 | import numpy as np 7 | import torch 8 | from PIL import Image 9 | from PIL import ImageOps 10 | from torch.utils import data 11 | from utils.dataset_util import KITTI 12 | import random 13 | import cv2 14 | 15 | class ConcatDataset(torch.utils.data.Dataset): 16 | def __init__(self, *datasets): 17 | self.datasets = datasets 18 | 19 | def __getitem__(self, i): 20 | 21 | dd = {} 22 | {dd.update(d[i]) for d in self.datasets if d is not None} 23 | 24 | return dd 25 | 26 | def __len__(self): 27 | return max(len(d) for d in self.datasets if d is not None) 28 | 29 | class VKittiDataset(data.Dataset): 30 | def __init__(self, root='./datasets', data_file='src_train.list', 31 | phase='train', img_transform=None, depth_transform=None, 32 | joint_transform=None): 33 | self.root = root 34 | self.data_file = data_file 35 | self.files = [] 36 | self.phase = phase 37 | self.img_transform = img_transform 38 | self.depth_transform = depth_transform 39 | self.joint_transform = joint_transform 40 | 41 | with open(osp.join(self.root, self.data_file), 'r') as f: 42 | data_list = f.read().split('\n') 43 | for data in data_list: 44 | 45 | if len(data) == 0: 46 | continue 47 | data_info = data.split(' ') 48 | 49 | self.files.append({ 50 | "rgb": data_info[0], 51 | "depth": data_info[1] 52 | }) 53 | 54 | 55 | 56 | def __len__(self): 57 | return len(self.files) 58 | 59 | def read_data(self, datafiles): 60 | 61 | assert osp.exists(osp.join(self.root, datafiles['rgb'])), "Image does not exist" 62 | rgb = Image.open(osp.join(self.root, datafiles['rgb'])).convert('RGB') 63 | 64 | assert osp.exists(osp.join(self.root, datafiles['depth'])), "Depth does not exist" 65 | depth = Image.open(osp.join(self.root, datafiles['depth'])) 66 | 67 | return rgb, depth 68 | 69 | def __getitem__(self, index): 70 | if self.phase == 'train': 71 | index = random.randint(0, len(self)-1) 72 | if index > len(self) - 1: 73 | index = index % len(self) 74 | datafiles = self.files[index] 75 | img, depth = self.read_data(datafiles) 76 | 77 | if self.joint_transform is not None: 78 | if self.phase == 'train': 79 | img, _, depth, _ = self.joint_transform((img, None, depth, self.phase, None)) 80 | else: 81 | img, _, depth, _ = self.joint_transform((img, None, depth, 'test', None)) 82 | 83 | if self.img_transform is not None: 84 | img = self.img_transform(img) 85 | 86 | if self.depth_transform is not None: 87 | depth = self.depth_transform(depth) 88 | 89 | if self.phase =='test': 90 | data = {} 91 | data['img'] = l_img 92 | data['depth'] = depth 93 | return data 94 | 95 | data = {} 96 | if img is not None: 97 | data['img'] = img 98 | if depth is not None: 99 | data['depth'] = depth 100 | return {'src': data} 101 | 102 | class KittiDataset(data.Dataset): 103 | def __init__(self, root='./datasets', data_file='tgt_train.list', phase='train', 104 | img_transform=None, joint_transform=None, depth_transform=None): 105 | 106 | self.root = root 107 | self.data_file = data_file 108 | self.files = [] 109 | self.phase = phase 110 | self.img_transform = img_transform 111 | self.joint_transform = joint_transform 112 | self.depth_transform = depth_transform 113 | 114 | with open(osp.join(self.root, self.data_file), 'r') as f: 115 | data_list = f.read().split('\n') 116 | for data in data_list: 117 | if len(data) == 0: 118 | continue 119 | 120 | data_info = data.split(' ') 121 | 122 | self.files.append({ 123 | "l_rgb": data_info[0], 124 | "r_rgb": data_info[1], 125 | "cam_intrin": data_info[2], 126 | "depth": data_info[3] 127 | }) 128 | 129 | def __len__(self): 130 | return len(self.files) 131 | 132 | def read_data(self, datafiles): 133 | #print(osp.join(self.root, datafiles['l_rgb'])) 134 | assert osp.exists(osp.join(self.root, datafiles['l_rgb'])), "Image does not exist" 135 | l_rgb = Image.open(osp.join(self.root, datafiles['l_rgb'])).convert('RGB') 136 | w = l_rgb.size[0] 137 | h = l_rgb.size[1] 138 | assert osp.exists(osp.join(self.root, datafiles['r_rgb'])), "Image does not exist" 139 | r_rgb = Image.open(osp.join(self.root, datafiles['r_rgb'])).convert('RGB') 140 | 141 | kitti = KITTI() 142 | assert osp.exists(osp.join(self.root, datafiles['cam_intrin'])), "Camera info does not exist" 143 | fb = kitti.get_fb(osp.join(self.root, datafiles['cam_intrin'])) 144 | assert osp.exists(osp.join(self.root, datafiles['depth'])), "Depth does not exist" 145 | depth = kitti.get_depth(osp.join(self.root, datafiles['cam_intrin']), 146 | osp.join(self.root, datafiles['depth']), [h, w]) 147 | 148 | return l_rgb, r_rgb, fb, depth 149 | 150 | def __getitem__(self, index): 151 | if self.phase == 'train': 152 | index = random.randint(0, len(self)-1) 153 | if index > len(self)-1: 154 | index = index % len(self) 155 | datafiles = self.files[index] 156 | l_img, r_img, fb, depth = self.read_data(datafiles) 157 | 158 | if self.joint_transform is not None: 159 | if self.phase == 'train': 160 | l_img, r_img, _, fb = self.joint_transform((l_img, r_img, None, 'train', fb)) 161 | else: 162 | l_img, r_img, _, fb = self.joint_transform((l_img, r_img, None, 'test', fb)) 163 | 164 | if self.img_transform is not None: 165 | l_img = self.img_transform(l_img) 166 | if r_img is not None: 167 | r_img = self.img_transform(r_img) 168 | 169 | if self.phase =='test': 170 | data = {} 171 | data['left_img'] = l_img 172 | data['right_img'] = r_img 173 | data['depth'] = depth 174 | data['fb'] = fb 175 | return data 176 | 177 | data = {} 178 | if l_img is not None: 179 | data['left_img'] = l_img 180 | if r_img is not None: 181 | data['right_img'] = r_img 182 | if fb is not None: 183 | data['fb'] = fb 184 | 185 | return {'tgt': data} 186 | 187 | # just for test 188 | class StereoDataset(data.Dataset): 189 | def __init__(self, root='./datasets', data_file='test.list', phase='test', 190 | img_transform=None, joint_transform=None, depth_transform=None): 191 | self.root = root 192 | self.data_file = data_file 193 | self.files = [] 194 | self.phase = phase 195 | self.img_transform = img_transform 196 | self.joint_transform = joint_transform 197 | 198 | with open(osp.join(self.root, self.data_file), 'r') as f: 199 | data_list = f.read().split('\n') 200 | for data in data_list: 201 | if len(data) == 0: 202 | continue 203 | 204 | data_info = data.split(' ') 205 | 206 | self.files.append({ 207 | "rgb": data_info[0], 208 | }) 209 | 210 | def __len__(self): 211 | return len(self.files) 212 | 213 | def read_data(self, datafiles): 214 | 215 | print(osp.join(self.root, datafiles['rgb'])) 216 | assert osp.exists(osp.join(self.root, datafiles['rgb'])), "Image does not exist" 217 | rgb = Image.open(osp.join(self.root, datafiles['rgb'])).convert('RGB') 218 | 219 | disp = cv2.imread(osp.join(self.root, datafiles['rgb'].replace('image_2', 'disp_noc_0').replace('jpg', 'png')), -1) 220 | disp = disp.astype(np.float32)/256.0 221 | return rgb, disp 222 | 223 | def __getitem__(self, index): 224 | index = index % len(self) 225 | datafiles = self.files[index] 226 | img, disp = self.read_data(datafiles) 227 | 228 | if self.joint_transform is not None: 229 | img, _, _, _, _ = self.joint_transform((img, None, None, 'test', None, None)) 230 | 231 | if self.img_transform is not None: 232 | img = self.img_transform(img) 233 | 234 | data = {} 235 | data['left_img'] = img 236 | data['disp'] = disp 237 | return data 238 | 239 | def get_dataset(root, data_file='train.list', dataset='kitti', phase='train', 240 | img_transform=None, depth_transform=None, 241 | joint_transform=None, test_dataset='kitti'): 242 | 243 | DEFINED_DATASET = {'KITTI', 'VKITTI'} 244 | assert dataset.upper() in DEFINED_DATASET 245 | name2obj = {'KITTI': KittiDataset, 246 | 'VKITTI': VKittiDataset, 247 | } 248 | if phase == 'test' and test_dataset == 'stereo': 249 | name2obj['KITTI'] = StereoDataset 250 | 251 | return name2obj[dataset.upper()](root=root, data_file=data_file, phase=phase, 252 | img_transform=img_transform, depth_transform=depth_transform, 253 | joint_transform=joint_transform) 254 | 255 | -------------------------------------------------------------------------------- /data/datasets.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/data/datasets.pyc -------------------------------------------------------------------------------- /data/transform.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import math 3 | import numbers 4 | import random 5 | 6 | import numpy as np 7 | import torch 8 | from PIL import Image 9 | import torchvision.transforms.functional as F 10 | import torchvision.transforms as transforms 11 | 12 | class RandomHorizontalFlip(object): 13 | """ 14 | Random horizontal flip. 15 | 16 | prob = 0.5 17 | """ 18 | 19 | def __init__(self, prob=None): 20 | self.prob = prob 21 | 22 | def __call__(self, img): 23 | if (self.prob is None and random.random() < 0.5) or self.prob < 0.5: 24 | return img.transpose(Image.FLIP_LEFT_RIGHT) 25 | 26 | return img 27 | 28 | 29 | class RandomVerticalFlip(object): 30 | """ 31 | Random vertical flip. 32 | 33 | prob = 0.5 34 | """ 35 | 36 | def __init__(self, prob=None): 37 | self.prob = prob 38 | 39 | def __call__(self, img): 40 | 41 | if (self.prob is None and random.random() < 0.5) or self.prob < 0.5: 42 | return img.transpose(Image.FLIP_TOP_BOTTOM) 43 | return img 44 | 45 | class RandomPairedCrop(object): 46 | 47 | def __init__(self, size): 48 | self.size = size 49 | 50 | @staticmethod 51 | def get_params(img, output_size): 52 | """ 53 | Get parameters for ``crop`` for a random crop. 54 | Args: 55 | img (PIL Image): Image to be cropped. 56 | output_size (tuple): Expected output size of the crop. 57 | Returns: 58 | tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. 59 | """ 60 | w, h = img.size 61 | th, tw = output_size 62 | if w == tw and h == th: 63 | return 0, 0, h, w 64 | 65 | i = random.randint(0, h - th) 66 | j = random.randint(0, w - tw) 67 | return i, j, th, tw 68 | 69 | def __call__(self, img): 70 | img1 = img[0] 71 | img2 = img[1] 72 | depth = img[2] 73 | 74 | i, j, th, tw = self.get_params(img1, self.size) 75 | 76 | img1 = F.crop(img1, i, j, th, tw) 77 | 78 | if depth is not None: 79 | depth = F.crop(depth, i, j, th, tw) 80 | if img2 is not None: 81 | img2 = F.crop(img2, i, j, th, tw) 82 | return img1, img2, depth 83 | 84 | 85 | class RandomImgAugment(object): 86 | """Randomly shift gamma""" 87 | 88 | def __init__(self, no_flip, no_rotation, no_augment, size=None): 89 | 90 | self.flip = not no_flip 91 | self.augment = not no_augment 92 | self.rotation = not no_rotation 93 | self.size = size 94 | 95 | 96 | def __call__(self, inputs): 97 | 98 | img1 = inputs[0] 99 | img2 = inputs[1] 100 | depth = inputs[2] 101 | phase = inputs[3] 102 | fb = inputs[4] 103 | 104 | h = img1.height 105 | w = img1.width 106 | w0 = w 107 | 108 | if self.size == [-1]: 109 | divisor = 32.0 110 | h = int(math.ceil(h/divisor) * divisor) 111 | w = int(math.ceil(w/divisor) * divisor) 112 | self.size = (h, w) 113 | 114 | scale_transform = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)]) 115 | 116 | img1 = scale_transform(img1) 117 | if img2 is not None: 118 | img2 = scale_transform(img2) 119 | 120 | if fb is not None: 121 | scale = float(self.size[1]) / float(w0) 122 | fb = fb * scale 123 | if phase == 'test': 124 | return img1, img2, depth, fb 125 | 126 | if depth is not None: 127 | scale_transform_d = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)]) 128 | depth = scale_transform_d(depth) 129 | 130 | if not self.size == 0: 131 | 132 | if depth is not None: 133 | arr_depth = np.array(depth, dtype=np.float32) 134 | arr_depth /= 65535.0 # cm->m, /10 135 | 136 | arr_depth[arr_depth<0.0] = 0.0 137 | depth = Image.fromarray(arr_depth, 'F') 138 | 139 | if self.flip and not (img2 is not None and depth is not None): 140 | 141 | flip_prob = random.random() 142 | flip_transform = transforms.Compose([RandomHorizontalFlip(flip_prob)]) 143 | if img2 is None: 144 | img1 = flip_transform(img1) 145 | else: 146 | if flip_prob < 0.5: 147 | img1_ = img1 148 | img2_ = img2 149 | img1 = flip_transform(img2_) 150 | img2 = flip_transform(img1_) 151 | if depth is not None: 152 | depth = flip_transform(depth) 153 | 154 | if self.rotation and not (img2 is not None and depth is not None): 155 | if random.random() < 0.5: 156 | degree = random.randrange(-500, 500)/100 157 | img1 = F.rotate(img1, degree, Image.BICUBIC) 158 | if depth is not None: 159 | depth = F.rotate(depth, degree, Image.BILINEAR) 160 | if img2 is not None: 161 | img2 = F.rotate(img2, degree, Image.BICUBIC) 162 | if depth is not None: 163 | depth = np.array(depth, dtype=np.float32) 164 | depth = depth * 2.0 165 | depth -= 1.0 166 | 167 | if self.augment: 168 | if random.random() < 0.5: 169 | 170 | brightness = random.uniform(0.8, 1.0) 171 | contrast = random.uniform(0.8, 1.0) 172 | saturation = random.uniform(0.8, 1.0) 173 | 174 | img1 = F.adjust_brightness(img1, brightness) 175 | img1 = F.adjust_contrast(img1, contrast) 176 | img1 = F.adjust_saturation(img1, saturation) 177 | 178 | if img2 is not None: 179 | img2 = F.adjust_brightness(img2, brightness) 180 | img2 = F.adjust_contrast(img2, contrast) 181 | img2 = F.adjust_saturation(img2, saturation) 182 | return img1, img2, depth, fb 183 | 184 | class DepthToTensor(object): 185 | def __call__(self, input): 186 | # tensors = [], [0, 1] -> [-1, 1] 187 | arr_input = np.array(input) 188 | tensors = torch.from_numpy(arr_input.reshape((1, arr_input.shape[0], arr_input.shape[1]))).float() 189 | return tensors 190 | 191 | -------------------------------------------------------------------------------- /data/transform.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/data/transform.pyc -------------------------------------------------------------------------------- /datasets/vkitti/test.txt: -------------------------------------------------------------------------------- 1 | rgb/0006/clone/00051.png depth/0006/clone/00051.png 2 | rgb/0006/clone/00163.png depth/0006/clone/00163.png 3 | rgb/0006/clone/00150.png depth/0006/clone/00150.png 4 | rgb/0006/clone/00104.png depth/0006/clone/00104.png 5 | rgb/0006/clone/00208.png depth/0006/clone/00208.png 6 | rgb/0006/clone/00238.png depth/0006/clone/00238.png 7 | rgb/0006/clone/00228.png depth/0006/clone/00228.png 8 | rgb/0006/clone/00241.png depth/0006/clone/00241.png 9 | rgb/0006/15-deg-left/00011.png depth/0006/15-deg-left/00011.png 10 | rgb/0006/15-deg-left/00208.png depth/0006/15-deg-left/00208.png 11 | rgb/0006/15-deg-left/00181.png depth/0006/15-deg-left/00181.png 12 | rgb/0006/sunset/00157.png depth/0006/sunset/00157.png 13 | rgb/0006/sunset/00190.png depth/0006/sunset/00190.png 14 | rgb/0006/sunset/00173.png depth/0006/sunset/00173.png 15 | rgb/0006/sunset/00222.png depth/0006/sunset/00222.png 16 | rgb/0006/sunset/00023.png depth/0006/sunset/00023.png 17 | rgb/0006/sunset/00207.png depth/0006/sunset/00207.png 18 | rgb/0006/sunset/00153.png depth/0006/sunset/00153.png 19 | rgb/0006/sunset/00169.png depth/0006/sunset/00169.png 20 | rgb/0006/rain/00067.png depth/0006/rain/00067.png 21 | rgb/0006/rain/00032.png depth/0006/rain/00032.png 22 | rgb/0006/rain/00014.png depth/0006/rain/00014.png 23 | rgb/0006/rain/00061.png depth/0006/rain/00061.png 24 | rgb/0006/morning/00258.png depth/0006/morning/00258.png 25 | rgb/0006/morning/00212.png depth/0006/morning/00212.png 26 | rgb/0006/morning/00032.png depth/0006/morning/00032.png 27 | rgb/0006/morning/00172.png depth/0006/morning/00172.png 28 | rgb/0006/morning/00193.png depth/0006/morning/00193.png 29 | rgb/0006/morning/00071.png depth/0006/morning/00071.png 30 | rgb/0006/morning/00250.png depth/0006/morning/00250.png 31 | rgb/0006/15-deg-right/00245.png depth/0006/15-deg-right/00245.png 32 | rgb/0006/15-deg-right/00173.png depth/0006/15-deg-right/00173.png 33 | rgb/0006/15-deg-right/00162.png depth/0006/15-deg-right/00162.png 34 | rgb/0006/15-deg-right/00149.png depth/0006/15-deg-right/00149.png 35 | rgb/0006/15-deg-right/00074.png depth/0006/15-deg-right/00074.png 36 | rgb/0006/15-deg-right/00075.png depth/0006/15-deg-right/00075.png 37 | rgb/0006/15-deg-right/00011.png depth/0006/15-deg-right/00011.png 38 | rgb/0006/15-deg-right/00215.png depth/0006/15-deg-right/00215.png 39 | rgb/0006/15-deg-right/00182.png depth/0006/15-deg-right/00182.png 40 | rgb/0006/15-deg-right/00105.png depth/0006/15-deg-right/00105.png 41 | rgb/0006/15-deg-right/00243.png depth/0006/15-deg-right/00243.png 42 | rgb/0006/30-deg-right/00028.png depth/0006/30-deg-right/00028.png 43 | rgb/0006/30-deg-right/00067.png depth/0006/30-deg-right/00067.png 44 | rgb/0006/30-deg-right/00120.png depth/0006/30-deg-right/00120.png 45 | rgb/0006/30-deg-right/00229.png depth/0006/30-deg-right/00229.png 46 | rgb/0006/30-deg-right/00236.png depth/0006/30-deg-right/00236.png 47 | rgb/0006/30-deg-right/00030.png depth/0006/30-deg-right/00030.png 48 | rgb/0006/fog/00160.png depth/0006/fog/00160.png 49 | rgb/0006/30-deg-left/00210.png depth/0006/30-deg-left/00210.png 50 | rgb/0006/30-deg-left/00051.png depth/0006/30-deg-left/00051.png 51 | rgb/0006/30-deg-left/00111.png depth/0006/30-deg-left/00111.png 52 | rgb/0006/30-deg-left/00094.png depth/0006/30-deg-left/00094.png 53 | rgb/0006/30-deg-left/00168.png depth/0006/30-deg-left/00168.png 54 | rgb/0006/30-deg-left/00030.png depth/0006/30-deg-left/00030.png 55 | rgb/0006/30-deg-left/00263.png depth/0006/30-deg-left/00263.png 56 | rgb/0006/30-deg-left/00071.png depth/0006/30-deg-left/00071.png 57 | rgb/0006/30-deg-left/00262.png depth/0006/30-deg-left/00262.png 58 | rgb/0006/overcast/00008.png depth/0006/overcast/00008.png 59 | rgb/0006/overcast/00220.png depth/0006/overcast/00220.png 60 | rgb/0006/overcast/00064.png depth/0006/overcast/00064.png 61 | rgb/0006/overcast/00147.png depth/0006/overcast/00147.png 62 | rgb/0006/overcast/00237.png depth/0006/overcast/00237.png 63 | rgb/0006/overcast/00241.png depth/0006/overcast/00241.png 64 | rgb/0018/clone/00176.png depth/0018/clone/00176.png 65 | rgb/0018/clone/00106.png depth/0018/clone/00106.png 66 | rgb/0018/clone/00080.png depth/0018/clone/00080.png 67 | rgb/0018/clone/00067.png depth/0018/clone/00067.png 68 | rgb/0018/clone/00029.png depth/0018/clone/00029.png 69 | rgb/0018/clone/00306.png depth/0018/clone/00306.png 70 | rgb/0018/clone/00070.png depth/0018/clone/00070.png 71 | rgb/0018/clone/00130.png depth/0018/clone/00130.png 72 | rgb/0018/15-deg-left/00318.png depth/0018/15-deg-left/00318.png 73 | rgb/0018/15-deg-left/00244.png depth/0018/15-deg-left/00244.png 74 | rgb/0018/15-deg-left/00301.png depth/0018/15-deg-left/00301.png 75 | rgb/0018/15-deg-left/00102.png depth/0018/15-deg-left/00102.png 76 | rgb/0018/15-deg-left/00315.png depth/0018/15-deg-left/00315.png 77 | rgb/0018/15-deg-left/00127.png depth/0018/15-deg-left/00127.png 78 | rgb/0018/15-deg-left/00177.png depth/0018/15-deg-left/00177.png 79 | rgb/0018/15-deg-left/00132.png depth/0018/15-deg-left/00132.png 80 | rgb/0018/15-deg-left/00118.png depth/0018/15-deg-left/00118.png 81 | rgb/0018/15-deg-left/00297.png depth/0018/15-deg-left/00297.png 82 | rgb/0018/15-deg-left/00105.png depth/0018/15-deg-left/00105.png 83 | rgb/0018/15-deg-left/00021.png depth/0018/15-deg-left/00021.png 84 | rgb/0018/15-deg-left/00062.png depth/0018/15-deg-left/00062.png 85 | rgb/0018/sunset/00097.png depth/0018/sunset/00097.png 86 | rgb/0018/sunset/00266.png depth/0018/sunset/00266.png 87 | rgb/0018/sunset/00302.png depth/0018/sunset/00302.png 88 | rgb/0018/sunset/00115.png depth/0018/sunset/00115.png 89 | rgb/0018/sunset/00140.png depth/0018/sunset/00140.png 90 | rgb/0018/sunset/00146.png depth/0018/sunset/00146.png 91 | rgb/0018/sunset/00207.png depth/0018/sunset/00207.png 92 | rgb/0018/sunset/00029.png depth/0018/sunset/00029.png 93 | rgb/0018/sunset/00211.png depth/0018/sunset/00211.png 94 | rgb/0018/sunset/00061.png depth/0018/sunset/00061.png 95 | rgb/0018/rain/00253.png depth/0018/rain/00253.png 96 | rgb/0018/rain/00292.png depth/0018/rain/00292.png 97 | rgb/0018/rain/00238.png depth/0018/rain/00238.png 98 | rgb/0018/rain/00191.png depth/0018/rain/00191.png 99 | rgb/0018/rain/00272.png depth/0018/rain/00272.png 100 | rgb/0018/rain/00130.png depth/0018/rain/00130.png 101 | rgb/0018/rain/00250.png depth/0018/rain/00250.png 102 | rgb/0018/morning/00176.png depth/0018/morning/00176.png 103 | rgb/0018/morning/00024.png depth/0018/morning/00024.png 104 | rgb/0018/morning/00076.png depth/0018/morning/00076.png 105 | rgb/0018/morning/00214.png depth/0018/morning/00214.png 106 | rgb/0018/morning/00110.png depth/0018/morning/00110.png 107 | rgb/0018/morning/00316.png depth/0018/morning/00316.png 108 | rgb/0018/morning/00131.png depth/0018/morning/00131.png 109 | rgb/0018/morning/00105.png depth/0018/morning/00105.png 110 | rgb/0018/15-deg-right/00148.png depth/0018/15-deg-right/00148.png 111 | rgb/0018/15-deg-right/00206.png depth/0018/15-deg-right/00206.png 112 | rgb/0018/15-deg-right/00210.png depth/0018/15-deg-right/00210.png 113 | rgb/0018/15-deg-right/00181.png depth/0018/15-deg-right/00181.png 114 | rgb/0018/15-deg-right/00216.png depth/0018/15-deg-right/00216.png 115 | rgb/0018/15-deg-right/00084.png depth/0018/15-deg-right/00084.png 116 | rgb/0018/15-deg-right/00330.png depth/0018/15-deg-right/00330.png 117 | rgb/0018/15-deg-right/00042.png depth/0018/15-deg-right/00042.png 118 | rgb/0018/15-deg-right/00130.png depth/0018/15-deg-right/00130.png 119 | rgb/0018/15-deg-right/00319.png depth/0018/15-deg-right/00319.png 120 | rgb/0018/30-deg-right/00311.png depth/0018/30-deg-right/00311.png 121 | rgb/0018/30-deg-right/00072.png depth/0018/30-deg-right/00072.png 122 | rgb/0018/30-deg-right/00217.png depth/0018/30-deg-right/00217.png 123 | rgb/0018/30-deg-right/00318.png depth/0018/30-deg-right/00318.png 124 | rgb/0018/30-deg-right/00036.png depth/0018/30-deg-right/00036.png 125 | rgb/0018/30-deg-right/00282.png depth/0018/30-deg-right/00282.png 126 | rgb/0018/30-deg-right/00123.png depth/0018/30-deg-right/00123.png 127 | rgb/0018/30-deg-right/00204.png depth/0018/30-deg-right/00204.png 128 | rgb/0018/30-deg-right/00035.png depth/0018/30-deg-right/00035.png 129 | rgb/0018/fog/00203.png depth/0018/fog/00203.png 130 | rgb/0018/fog/00016.png depth/0018/fog/00016.png 131 | rgb/0018/fog/00141.png depth/0018/fog/00141.png 132 | rgb/0018/fog/00275.png depth/0018/fog/00275.png 133 | rgb/0018/fog/00299.png depth/0018/fog/00299.png 134 | rgb/0018/fog/00192.png depth/0018/fog/00192.png 135 | rgb/0018/30-deg-left/00046.png depth/0018/30-deg-left/00046.png 136 | rgb/0018/30-deg-left/00253.png depth/0018/30-deg-left/00253.png 137 | rgb/0018/30-deg-left/00223.png depth/0018/30-deg-left/00223.png 138 | rgb/0018/30-deg-left/00038.png depth/0018/30-deg-left/00038.png 139 | rgb/0018/30-deg-left/00009.png depth/0018/30-deg-left/00009.png 140 | rgb/0018/30-deg-left/00104.png depth/0018/30-deg-left/00104.png 141 | rgb/0018/30-deg-left/00087.png depth/0018/30-deg-left/00087.png 142 | rgb/0018/30-deg-left/00127.png depth/0018/30-deg-left/00127.png 143 | rgb/0018/30-deg-left/00193.png depth/0018/30-deg-left/00193.png 144 | rgb/0018/30-deg-left/00133.png depth/0018/30-deg-left/00133.png 145 | rgb/0018/30-deg-left/00069.png depth/0018/30-deg-left/00069.png 146 | rgb/0018/30-deg-left/00235.png depth/0018/30-deg-left/00235.png 147 | rgb/0018/30-deg-left/00108.png depth/0018/30-deg-left/00108.png 148 | rgb/0018/overcast/00206.png depth/0018/overcast/00206.png 149 | rgb/0018/overcast/00210.png depth/0018/overcast/00210.png 150 | rgb/0018/overcast/00066.png depth/0018/overcast/00066.png 151 | rgb/0018/overcast/00303.png depth/0018/overcast/00303.png 152 | rgb/0018/overcast/00174.png depth/0018/overcast/00174.png 153 | rgb/0018/overcast/00014.png depth/0018/overcast/00014.png 154 | rgb/0018/overcast/00132.png depth/0018/overcast/00132.png 155 | rgb/0018/overcast/00168.png depth/0018/overcast/00168.png 156 | rgb/0018/overcast/00035.png depth/0018/overcast/00035.png 157 | rgb/0001/clone/00085.png depth/0001/clone/00085.png 158 | rgb/0001/clone/00082.png depth/0001/clone/00082.png 159 | rgb/0001/clone/00161.png depth/0001/clone/00161.png 160 | rgb/0001/clone/00074.png depth/0001/clone/00074.png 161 | rgb/0001/clone/00356.png depth/0001/clone/00356.png 162 | rgb/0001/clone/00355.png depth/0001/clone/00355.png 163 | rgb/0001/clone/00261.png depth/0001/clone/00261.png 164 | rgb/0001/clone/00362.png depth/0001/clone/00362.png 165 | rgb/0001/clone/00284.png depth/0001/clone/00284.png 166 | rgb/0001/clone/00349.png depth/0001/clone/00349.png 167 | rgb/0001/15-deg-left/00025.png depth/0001/15-deg-left/00025.png 168 | rgb/0001/15-deg-left/00221.png depth/0001/15-deg-left/00221.png 169 | rgb/0001/15-deg-left/00218.png depth/0001/15-deg-left/00218.png 170 | rgb/0001/15-deg-left/00018.png depth/0001/15-deg-left/00018.png 171 | rgb/0001/15-deg-left/00320.png depth/0001/15-deg-left/00320.png 172 | rgb/0001/15-deg-left/00216.png depth/0001/15-deg-left/00216.png 173 | rgb/0001/15-deg-left/00303.png depth/0001/15-deg-left/00303.png 174 | rgb/0001/15-deg-left/00359.png depth/0001/15-deg-left/00359.png 175 | rgb/0001/15-deg-left/00283.png depth/0001/15-deg-left/00283.png 176 | rgb/0001/15-deg-left/00315.png depth/0001/15-deg-left/00315.png 177 | rgb/0001/15-deg-left/00410.png depth/0001/15-deg-left/00410.png 178 | rgb/0001/15-deg-left/00017.png depth/0001/15-deg-left/00017.png 179 | rgb/0001/sunset/00080.png depth/0001/sunset/00080.png 180 | rgb/0001/sunset/00038.png depth/0001/sunset/00038.png 181 | rgb/0001/sunset/00121.png depth/0001/sunset/00121.png 182 | rgb/0001/sunset/00376.png depth/0001/sunset/00376.png 183 | rgb/0001/sunset/00207.png depth/0001/sunset/00207.png 184 | rgb/0001/sunset/00029.png depth/0001/sunset/00029.png 185 | rgb/0001/sunset/00132.png depth/0001/sunset/00132.png 186 | rgb/0001/sunset/00167.png depth/0001/sunset/00167.png 187 | rgb/0001/rain/00294.png depth/0001/rain/00294.png 188 | rgb/0001/rain/00239.png depth/0001/rain/00239.png 189 | rgb/0001/rain/00266.png depth/0001/rain/00266.png 190 | rgb/0001/rain/00268.png depth/0001/rain/00268.png 191 | rgb/0001/rain/00270.png depth/0001/rain/00270.png 192 | rgb/0001/rain/00238.png depth/0001/rain/00238.png 193 | rgb/0001/rain/00342.png depth/0001/rain/00342.png 194 | rgb/0001/rain/00055.png depth/0001/rain/00055.png 195 | rgb/0001/morning/00423.png depth/0001/morning/00423.png 196 | rgb/0001/morning/00429.png depth/0001/morning/00429.png 197 | rgb/0001/morning/00080.png depth/0001/morning/00080.png 198 | rgb/0001/morning/00408.png depth/0001/morning/00408.png 199 | rgb/0001/morning/00068.png depth/0001/morning/00068.png 200 | rgb/0001/morning/00406.png depth/0001/morning/00406.png 201 | rgb/0001/morning/00167.png depth/0001/morning/00167.png 202 | rgb/0001/morning/00133.png depth/0001/morning/00133.png 203 | rgb/0001/morning/00436.png depth/0001/morning/00436.png 204 | rgb/0001/morning/00071.png depth/0001/morning/00071.png 205 | rgb/0001/15-deg-right/00415.png depth/0001/15-deg-right/00415.png 206 | rgb/0001/15-deg-right/00325.png depth/0001/15-deg-right/00325.png 207 | rgb/0001/15-deg-right/00163.png depth/0001/15-deg-right/00163.png 208 | rgb/0001/15-deg-right/00089.png depth/0001/15-deg-right/00089.png 209 | rgb/0001/15-deg-right/00003.png depth/0001/15-deg-right/00003.png 210 | rgb/0001/15-deg-right/00257.png depth/0001/15-deg-right/00257.png 211 | rgb/0001/15-deg-right/00050.png depth/0001/15-deg-right/00050.png 212 | rgb/0001/15-deg-right/00054.png depth/0001/15-deg-right/00054.png 213 | rgb/0001/15-deg-right/00069.png depth/0001/15-deg-right/00069.png 214 | rgb/0001/15-deg-right/00037.png depth/0001/15-deg-right/00037.png 215 | rgb/0001/15-deg-right/00255.png depth/0001/15-deg-right/00255.png 216 | rgb/0001/30-deg-right/00253.png depth/0001/30-deg-right/00253.png 217 | rgb/0001/30-deg-right/00429.png depth/0001/30-deg-right/00429.png 218 | rgb/0001/30-deg-right/00159.png depth/0001/30-deg-right/00159.png 219 | rgb/0001/30-deg-right/00325.png depth/0001/30-deg-right/00325.png 220 | rgb/0001/30-deg-right/00165.png depth/0001/30-deg-right/00165.png 221 | rgb/0001/30-deg-right/00160.png depth/0001/30-deg-right/00160.png 222 | rgb/0001/30-deg-right/00368.png depth/0001/30-deg-right/00368.png 223 | rgb/0001/30-deg-right/00029.png depth/0001/30-deg-right/00029.png 224 | rgb/0001/30-deg-right/00390.png depth/0001/30-deg-right/00390.png 225 | rgb/0001/30-deg-right/00017.png depth/0001/30-deg-right/00017.png 226 | rgb/0001/30-deg-right/00341.png depth/0001/30-deg-right/00341.png 227 | rgb/0001/fog/00326.png depth/0001/fog/00326.png 228 | rgb/0001/fog/00246.png depth/0001/fog/00246.png 229 | rgb/0001/fog/00090.png depth/0001/fog/00090.png 230 | rgb/0001/fog/00063.png depth/0001/fog/00063.png 231 | rgb/0001/fog/00233.png depth/0001/fog/00233.png 232 | rgb/0001/fog/00419.png depth/0001/fog/00419.png 233 | rgb/0001/fog/00352.png depth/0001/fog/00352.png 234 | rgb/0001/fog/00244.png depth/0001/fog/00244.png 235 | rgb/0001/fog/00404.png depth/0001/fog/00404.png 236 | rgb/0001/fog/00273.png depth/0001/fog/00273.png 237 | rgb/0001/fog/00019.png depth/0001/fog/00019.png 238 | rgb/0001/fog/00104.png depth/0001/fog/00104.png 239 | rgb/0001/fog/00344.png depth/0001/fog/00344.png 240 | rgb/0001/fog/00367.png depth/0001/fog/00367.png 241 | rgb/0001/fog/00312.png depth/0001/fog/00312.png 242 | rgb/0001/fog/00227.png depth/0001/fog/00227.png 243 | rgb/0001/30-deg-left/00348.png depth/0001/30-deg-left/00348.png 244 | rgb/0001/30-deg-left/00067.png depth/0001/30-deg-left/00067.png 245 | rgb/0001/30-deg-left/00038.png depth/0001/30-deg-left/00038.png 246 | rgb/0001/30-deg-left/00347.png depth/0001/30-deg-left/00347.png 247 | rgb/0001/30-deg-left/00345.png depth/0001/30-deg-left/00345.png 248 | rgb/0001/30-deg-left/00222.png depth/0001/30-deg-left/00222.png 249 | rgb/0001/30-deg-left/00270.png depth/0001/30-deg-left/00270.png 250 | rgb/0001/30-deg-left/00014.png depth/0001/30-deg-left/00014.png 251 | rgb/0001/30-deg-left/00134.png depth/0001/30-deg-left/00134.png 252 | rgb/0001/30-deg-left/00015.png depth/0001/30-deg-left/00015.png 253 | rgb/0001/overcast/00403.png depth/0001/overcast/00403.png 254 | rgb/0001/overcast/00377.png depth/0001/overcast/00377.png 255 | rgb/0001/overcast/00393.png depth/0001/overcast/00393.png 256 | rgb/0001/overcast/00120.png depth/0001/overcast/00120.png 257 | rgb/0001/overcast/00300.png depth/0001/overcast/00300.png 258 | rgb/0001/overcast/00003.png depth/0001/overcast/00003.png 259 | rgb/0001/overcast/00301.png depth/0001/overcast/00301.png 260 | rgb/0001/overcast/00141.png depth/0001/overcast/00141.png 261 | rgb/0002/clone/00152.png depth/0002/clone/00152.png 262 | rgb/0002/clone/00188.png depth/0002/clone/00188.png 263 | rgb/0002/clone/00162.png depth/0002/clone/00162.png 264 | rgb/0002/clone/00075.png depth/0002/clone/00075.png 265 | rgb/0002/clone/00032.png depth/0002/clone/00032.png 266 | rgb/0002/clone/00202.png depth/0002/clone/00202.png 267 | rgb/0002/clone/00187.png depth/0002/clone/00187.png 268 | rgb/0002/clone/00084.png depth/0002/clone/00084.png 269 | rgb/0002/clone/00141.png depth/0002/clone/00141.png 270 | rgb/0002/clone/00225.png depth/0002/clone/00225.png 271 | rgb/0002/clone/00201.png depth/0002/clone/00201.png 272 | rgb/0002/clone/00136.png depth/0002/clone/00136.png 273 | rgb/0002/clone/00065.png depth/0002/clone/00065.png 274 | rgb/0002/clone/00035.png depth/0002/clone/00035.png 275 | rgb/0002/15-deg-left/00046.png depth/0002/15-deg-left/00046.png 276 | rgb/0002/15-deg-left/00000.png depth/0002/15-deg-left/00000.png 277 | rgb/0002/15-deg-left/00190.png depth/0002/15-deg-left/00190.png 278 | rgb/0002/15-deg-left/00088.png depth/0002/15-deg-left/00088.png 279 | rgb/0002/15-deg-left/00059.png depth/0002/15-deg-left/00059.png 280 | rgb/0002/15-deg-left/00198.png depth/0002/15-deg-left/00198.png 281 | rgb/0002/15-deg-left/00168.png depth/0002/15-deg-left/00168.png 282 | rgb/0002/15-deg-left/00192.png depth/0002/15-deg-left/00192.png 283 | rgb/0002/15-deg-left/00026.png depth/0002/15-deg-left/00026.png 284 | rgb/0002/15-deg-left/00006.png depth/0002/15-deg-left/00006.png 285 | rgb/0002/15-deg-left/00096.png depth/0002/15-deg-left/00096.png 286 | rgb/0002/sunset/00148.png depth/0002/sunset/00148.png 287 | rgb/0002/sunset/00187.png depth/0002/sunset/00187.png 288 | rgb/0002/rain/00004.png depth/0002/rain/00004.png 289 | rgb/0002/rain/00203.png depth/0002/rain/00203.png 290 | rgb/0002/rain/00220.png depth/0002/rain/00220.png 291 | rgb/0002/rain/00137.png depth/0002/rain/00137.png 292 | rgb/0002/rain/00100.png depth/0002/rain/00100.png 293 | rgb/0002/morning/00157.png depth/0002/morning/00157.png 294 | rgb/0002/morning/00206.png depth/0002/morning/00206.png 295 | rgb/0002/morning/00081.png depth/0002/morning/00081.png 296 | rgb/0002/morning/00060.png depth/0002/morning/00060.png 297 | rgb/0002/morning/00100.png depth/0002/morning/00100.png 298 | rgb/0002/15-deg-right/00176.png depth/0002/15-deg-right/00176.png 299 | rgb/0002/15-deg-right/00076.png depth/0002/15-deg-right/00076.png 300 | rgb/0002/15-deg-right/00221.png depth/0002/15-deg-right/00221.png 301 | rgb/0002/15-deg-right/00045.png depth/0002/15-deg-right/00045.png 302 | rgb/0002/15-deg-right/00055.png depth/0002/15-deg-right/00055.png 303 | rgb/0002/30-deg-right/00112.png depth/0002/30-deg-right/00112.png 304 | rgb/0002/30-deg-right/00107.png depth/0002/30-deg-right/00107.png 305 | rgb/0002/30-deg-right/00069.png depth/0002/30-deg-right/00069.png 306 | rgb/0002/fog/00115.png depth/0002/fog/00115.png 307 | rgb/0002/fog/00104.png depth/0002/fog/00104.png 308 | rgb/0002/fog/00219.png depth/0002/fog/00219.png 309 | rgb/0002/fog/00058.png depth/0002/fog/00058.png 310 | rgb/0002/30-deg-left/00085.png depth/0002/30-deg-left/00085.png 311 | rgb/0002/30-deg-left/00081.png depth/0002/30-deg-left/00081.png 312 | rgb/0002/30-deg-left/00164.png depth/0002/30-deg-left/00164.png 313 | rgb/0002/30-deg-left/00232.png depth/0002/30-deg-left/00232.png 314 | rgb/0002/30-deg-left/00003.png depth/0002/30-deg-left/00003.png 315 | rgb/0002/30-deg-left/00094.png depth/0002/30-deg-left/00094.png 316 | rgb/0002/overcast/00098.png depth/0002/overcast/00098.png 317 | rgb/0002/overcast/00190.png depth/0002/overcast/00190.png 318 | rgb/0002/overcast/00081.png depth/0002/overcast/00081.png 319 | rgb/0002/overcast/00164.png depth/0002/overcast/00164.png 320 | rgb/0002/overcast/00029.png depth/0002/overcast/00029.png 321 | rgb/0002/overcast/00192.png depth/0002/overcast/00192.png 322 | rgb/0002/overcast/00056.png depth/0002/overcast/00056.png 323 | rgb/0002/overcast/00204.png depth/0002/overcast/00204.png 324 | rgb/0002/overcast/00035.png depth/0002/overcast/00035.png 325 | rgb/0020/clone/00640.png depth/0020/clone/00640.png 326 | rgb/0020/clone/00081.png depth/0020/clone/00081.png 327 | rgb/0020/clone/00418.png depth/0020/clone/00418.png 328 | rgb/0020/clone/00439.png depth/0020/clone/00439.png 329 | rgb/0020/clone/00741.png depth/0020/clone/00741.png 330 | rgb/0020/clone/00091.png depth/0020/clone/00091.png 331 | rgb/0020/clone/00712.png depth/0020/clone/00712.png 332 | rgb/0020/clone/00166.png depth/0020/clone/00166.png 333 | rgb/0020/clone/00198.png depth/0020/clone/00198.png 334 | rgb/0020/clone/00363.png depth/0020/clone/00363.png 335 | rgb/0020/clone/00124.png depth/0020/clone/00124.png 336 | rgb/0020/clone/00700.png depth/0020/clone/00700.png 337 | rgb/0020/clone/00748.png depth/0020/clone/00748.png 338 | rgb/0020/clone/00751.png depth/0020/clone/00751.png 339 | rgb/0020/15-deg-left/00046.png depth/0020/15-deg-left/00046.png 340 | rgb/0020/15-deg-left/00594.png depth/0020/15-deg-left/00594.png 341 | rgb/0020/15-deg-left/00732.png depth/0020/15-deg-left/00732.png 342 | rgb/0020/15-deg-left/00212.png depth/0020/15-deg-left/00212.png 343 | rgb/0020/15-deg-left/00488.png depth/0020/15-deg-left/00488.png 344 | rgb/0020/15-deg-left/00630.png depth/0020/15-deg-left/00630.png 345 | rgb/0020/15-deg-left/00601.png depth/0020/15-deg-left/00601.png 346 | rgb/0020/15-deg-left/00578.png depth/0020/15-deg-left/00578.png 347 | rgb/0020/15-deg-left/00628.png depth/0020/15-deg-left/00628.png 348 | rgb/0020/15-deg-left/00691.png depth/0020/15-deg-left/00691.png 349 | rgb/0020/15-deg-left/00453.png depth/0020/15-deg-left/00453.png 350 | rgb/0020/15-deg-left/00370.png depth/0020/15-deg-left/00370.png 351 | rgb/0020/15-deg-left/00776.png depth/0020/15-deg-left/00776.png 352 | rgb/0020/15-deg-left/00582.png depth/0020/15-deg-left/00582.png 353 | rgb/0020/15-deg-left/00168.png depth/0020/15-deg-left/00168.png 354 | rgb/0020/15-deg-left/00521.png depth/0020/15-deg-left/00521.png 355 | rgb/0020/15-deg-left/00773.png depth/0020/15-deg-left/00773.png 356 | rgb/0020/15-deg-left/00714.png depth/0020/15-deg-left/00714.png 357 | rgb/0020/15-deg-left/00481.png depth/0020/15-deg-left/00481.png 358 | rgb/0020/15-deg-left/00561.png depth/0020/15-deg-left/00561.png 359 | rgb/0020/15-deg-left/00047.png depth/0020/15-deg-left/00047.png 360 | rgb/0020/15-deg-left/00396.png depth/0020/15-deg-left/00396.png 361 | rgb/0020/sunset/00805.png depth/0020/sunset/00805.png 362 | rgb/0020/sunset/00514.png depth/0020/sunset/00514.png 363 | rgb/0020/sunset/00592.png depth/0020/sunset/00592.png 364 | rgb/0020/sunset/00557.png depth/0020/sunset/00557.png 365 | rgb/0020/sunset/00163.png depth/0020/sunset/00163.png 366 | rgb/0020/sunset/00183.png depth/0020/sunset/00183.png 367 | rgb/0020/sunset/00686.png depth/0020/sunset/00686.png 368 | rgb/0020/sunset/00715.png depth/0020/sunset/00715.png 369 | rgb/0020/sunset/00411.png depth/0020/sunset/00411.png 370 | rgb/0020/sunset/00768.png depth/0020/sunset/00768.png 371 | rgb/0020/sunset/00682.png depth/0020/sunset/00682.png 372 | rgb/0020/sunset/00146.png depth/0020/sunset/00146.png 373 | rgb/0020/sunset/00644.png depth/0020/sunset/00644.png 374 | rgb/0020/sunset/00338.png depth/0020/sunset/00338.png 375 | rgb/0020/sunset/00056.png depth/0020/sunset/00056.png 376 | rgb/0020/sunset/00664.png depth/0020/sunset/00664.png 377 | rgb/0020/sunset/00297.png depth/0020/sunset/00297.png 378 | rgb/0020/sunset/00093.png depth/0020/sunset/00093.png 379 | rgb/0020/sunset/00706.png depth/0020/sunset/00706.png 380 | rgb/0020/sunset/00618.png depth/0020/sunset/00618.png 381 | rgb/0020/rain/00640.png depth/0020/rain/00640.png 382 | rgb/0020/rain/00324.png depth/0020/rain/00324.png 383 | rgb/0020/rain/00139.png depth/0020/rain/00139.png 384 | rgb/0020/rain/00066.png depth/0020/rain/00066.png 385 | rgb/0020/rain/00126.png depth/0020/rain/00126.png 386 | rgb/0020/rain/00146.png depth/0020/rain/00146.png 387 | rgb/0020/rain/00344.png depth/0020/rain/00344.png 388 | rgb/0020/rain/00440.png depth/0020/rain/00440.png 389 | rgb/0020/rain/00773.png depth/0020/rain/00773.png 390 | rgb/0020/rain/00795.png depth/0020/rain/00795.png 391 | rgb/0020/rain/00777.png depth/0020/rain/00777.png 392 | rgb/0020/rain/00675.png depth/0020/rain/00675.png 393 | rgb/0020/rain/00544.png depth/0020/rain/00544.png 394 | rgb/0020/rain/00319.png depth/0020/rain/00319.png 395 | rgb/0020/rain/00012.png depth/0020/rain/00012.png 396 | rgb/0020/morning/00654.png depth/0020/morning/00654.png 397 | rgb/0020/morning/00473.png depth/0020/morning/00473.png 398 | rgb/0020/morning/00259.png depth/0020/morning/00259.png 399 | rgb/0020/morning/00082.png depth/0020/morning/00082.png 400 | rgb/0020/morning/00090.png depth/0020/morning/00090.png 401 | rgb/0020/morning/00645.png depth/0020/morning/00645.png 402 | rgb/0020/morning/00447.png depth/0020/morning/00447.png 403 | rgb/0020/morning/00336.png depth/0020/morning/00336.png 404 | rgb/0020/morning/00278.png depth/0020/morning/00278.png 405 | rgb/0020/morning/00438.png depth/0020/morning/00438.png 406 | rgb/0020/morning/00691.png depth/0020/morning/00691.png 407 | rgb/0020/morning/00171.png depth/0020/morning/00171.png 408 | rgb/0020/morning/00605.png depth/0020/morning/00605.png 409 | rgb/0020/morning/00435.png depth/0020/morning/00435.png 410 | rgb/0020/morning/00409.png depth/0020/morning/00409.png 411 | rgb/0020/morning/00169.png depth/0020/morning/00169.png 412 | rgb/0020/morning/00669.png depth/0020/morning/00669.png 413 | rgb/0020/morning/00138.png depth/0020/morning/00138.png 414 | rgb/0020/morning/00572.png depth/0020/morning/00572.png 415 | rgb/0020/morning/00598.png depth/0020/morning/00598.png 416 | rgb/0020/15-deg-right/00584.png depth/0020/15-deg-right/00584.png 417 | rgb/0020/15-deg-right/00148.png depth/0020/15-deg-right/00148.png 418 | rgb/0020/15-deg-right/00709.png depth/0020/15-deg-right/00709.png 419 | rgb/0020/15-deg-right/00223.png depth/0020/15-deg-right/00223.png 420 | rgb/0020/15-deg-right/00650.png depth/0020/15-deg-right/00650.png 421 | rgb/0020/15-deg-right/00581.png depth/0020/15-deg-right/00581.png 422 | rgb/0020/15-deg-right/00537.png depth/0020/15-deg-right/00537.png 423 | rgb/0020/15-deg-right/00505.png depth/0020/15-deg-right/00505.png 424 | rgb/0020/15-deg-right/00339.png depth/0020/15-deg-right/00339.png 425 | rgb/0020/15-deg-right/00218.png depth/0020/15-deg-right/00218.png 426 | rgb/0020/15-deg-right/00068.png depth/0020/15-deg-right/00068.png 427 | rgb/0020/15-deg-right/00487.png depth/0020/15-deg-right/00487.png 428 | rgb/0020/15-deg-right/00477.png depth/0020/15-deg-right/00477.png 429 | rgb/0020/15-deg-right/00273.png depth/0020/15-deg-right/00273.png 430 | rgb/0020/15-deg-right/00578.png depth/0020/15-deg-right/00578.png 431 | rgb/0020/15-deg-right/00682.png depth/0020/15-deg-right/00682.png 432 | rgb/0020/15-deg-right/00666.png depth/0020/15-deg-right/00666.png 433 | rgb/0020/15-deg-right/00767.png depth/0020/15-deg-right/00767.png 434 | rgb/0020/15-deg-right/00117.png depth/0020/15-deg-right/00117.png 435 | rgb/0020/15-deg-right/00522.png depth/0020/15-deg-right/00522.png 436 | rgb/0020/15-deg-right/00308.png depth/0020/15-deg-right/00308.png 437 | rgb/0020/15-deg-right/00127.png depth/0020/15-deg-right/00127.png 438 | rgb/0020/15-deg-right/00029.png depth/0020/15-deg-right/00029.png 439 | rgb/0020/15-deg-right/00497.png depth/0020/15-deg-right/00497.png 440 | rgb/0020/15-deg-right/00026.png depth/0020/15-deg-right/00026.png 441 | rgb/0020/15-deg-right/00265.png depth/0020/15-deg-right/00265.png 442 | rgb/0020/15-deg-right/00802.png depth/0020/15-deg-right/00802.png 443 | rgb/0020/15-deg-right/00396.png depth/0020/15-deg-right/00396.png 444 | rgb/0020/30-deg-right/00622.png depth/0020/30-deg-right/00622.png 445 | rgb/0020/30-deg-right/00415.png depth/0020/30-deg-right/00415.png 446 | rgb/0020/30-deg-right/00514.png depth/0020/30-deg-right/00514.png 447 | rgb/0020/30-deg-right/00333.png depth/0020/30-deg-right/00333.png 448 | rgb/0020/30-deg-right/00611.png depth/0020/30-deg-right/00611.png 449 | rgb/0020/30-deg-right/00223.png depth/0020/30-deg-right/00223.png 450 | rgb/0020/30-deg-right/00472.png depth/0020/30-deg-right/00472.png 451 | rgb/0020/30-deg-right/00526.png depth/0020/30-deg-right/00526.png 452 | rgb/0020/30-deg-right/00734.png depth/0020/30-deg-right/00734.png 453 | rgb/0020/30-deg-right/00484.png depth/0020/30-deg-right/00484.png 454 | rgb/0020/30-deg-right/00120.png depth/0020/30-deg-right/00120.png 455 | rgb/0020/30-deg-right/00758.png depth/0020/30-deg-right/00758.png 456 | rgb/0020/30-deg-right/00556.png depth/0020/30-deg-right/00556.png 457 | rgb/0020/30-deg-right/00014.png depth/0020/30-deg-right/00014.png 458 | rgb/0020/30-deg-right/00198.png depth/0020/30-deg-right/00198.png 459 | rgb/0020/30-deg-right/00813.png depth/0020/30-deg-right/00813.png 460 | rgb/0020/30-deg-right/00797.png depth/0020/30-deg-right/00797.png 461 | rgb/0020/30-deg-right/00748.png depth/0020/30-deg-right/00748.png 462 | rgb/0020/30-deg-right/00209.png depth/0020/30-deg-right/00209.png 463 | rgb/0020/30-deg-right/00047.png depth/0020/30-deg-right/00047.png 464 | rgb/0020/30-deg-right/00237.png depth/0020/30-deg-right/00237.png 465 | rgb/0020/30-deg-right/00101.png depth/0020/30-deg-right/00101.png 466 | rgb/0020/30-deg-right/00389.png depth/0020/30-deg-right/00389.png 467 | rgb/0020/30-deg-right/00071.png depth/0020/30-deg-right/00071.png 468 | rgb/0020/30-deg-right/00757.png depth/0020/30-deg-right/00757.png 469 | rgb/0020/30-deg-right/00096.png depth/0020/30-deg-right/00096.png 470 | rgb/0020/30-deg-right/00824.png depth/0020/30-deg-right/00824.png 471 | rgb/0020/30-deg-right/00108.png depth/0020/30-deg-right/00108.png 472 | rgb/0020/fog/00806.png depth/0020/fog/00806.png 473 | rgb/0020/fog/00369.png depth/0020/fog/00369.png 474 | rgb/0020/fog/00473.png depth/0020/fog/00473.png 475 | rgb/0020/fog/00781.png depth/0020/fog/00781.png 476 | rgb/0020/fog/00183.png depth/0020/fog/00183.png 477 | rgb/0020/fog/00269.png depth/0020/fog/00269.png 478 | rgb/0020/fog/00735.png depth/0020/fog/00735.png 479 | rgb/0020/fog/00273.png depth/0020/fog/00273.png 480 | rgb/0020/fog/00229.png depth/0020/fog/00229.png 481 | rgb/0020/fog/00542.png depth/0020/fog/00542.png 482 | rgb/0020/fog/00219.png depth/0020/fog/00219.png 483 | rgb/0020/fog/00406.png depth/0020/fog/00406.png 484 | rgb/0020/fog/00580.png depth/0020/fog/00580.png 485 | rgb/0020/fog/00635.png depth/0020/fog/00635.png 486 | rgb/0020/fog/00110.png depth/0020/fog/00110.png 487 | rgb/0020/fog/00427.png depth/0020/fog/00427.png 488 | rgb/0020/fog/00820.png depth/0020/fog/00820.png 489 | rgb/0020/fog/00101.png depth/0020/fog/00101.png 490 | rgb/0020/fog/00201.png depth/0020/fog/00201.png 491 | rgb/0020/fog/00536.png depth/0020/fog/00536.png 492 | rgb/0020/fog/00704.png depth/0020/fog/00704.png 493 | rgb/0020/fog/00061.png depth/0020/fog/00061.png 494 | rgb/0020/30-deg-left/00294.png depth/0020/30-deg-left/00294.png 495 | rgb/0020/30-deg-left/00384.png depth/0020/30-deg-left/00384.png 496 | rgb/0020/30-deg-left/00031.png depth/0020/30-deg-left/00031.png 497 | rgb/0020/30-deg-left/00804.png depth/0020/30-deg-left/00804.png 498 | rgb/0020/30-deg-left/00335.png depth/0020/30-deg-left/00335.png 499 | rgb/0020/30-deg-left/00476.png depth/0020/30-deg-left/00476.png 500 | rgb/0020/30-deg-left/00285.png depth/0020/30-deg-left/00285.png 501 | -------------------------------------------------------------------------------- /datasets/vkitti/val.txt: -------------------------------------------------------------------------------- 1 | rgb/0006/clone/00051.png depth/0006/clone/00051.png 2 | rgb/0006/clone/00163.png depth/0006/clone/00163.png 3 | rgb/0006/clone/00150.png depth/0006/clone/00150.png 4 | rgb/0006/clone/00104.png depth/0006/clone/00104.png 5 | rgb/0006/clone/00208.png depth/0006/clone/00208.png 6 | rgb/0006/clone/00238.png depth/0006/clone/00238.png 7 | rgb/0006/clone/00228.png depth/0006/clone/00228.png 8 | rgb/0006/clone/00241.png depth/0006/clone/00241.png 9 | rgb/0006/15-deg-left/00011.png depth/0006/15-deg-left/00011.png 10 | rgb/0006/15-deg-left/00208.png depth/0006/15-deg-left/00208.png 11 | rgb/0006/15-deg-left/00181.png depth/0006/15-deg-left/00181.png 12 | rgb/0006/sunset/00157.png depth/0006/sunset/00157.png 13 | rgb/0006/sunset/00190.png depth/0006/sunset/00190.png 14 | rgb/0006/sunset/00173.png depth/0006/sunset/00173.png 15 | rgb/0006/sunset/00222.png depth/0006/sunset/00222.png 16 | rgb/0006/sunset/00023.png depth/0006/sunset/00023.png 17 | rgb/0006/sunset/00207.png depth/0006/sunset/00207.png 18 | rgb/0006/sunset/00153.png depth/0006/sunset/00153.png 19 | rgb/0006/sunset/00169.png depth/0006/sunset/00169.png 20 | rgb/0006/rain/00067.png depth/0006/rain/00067.png 21 | rgb/0006/rain/00032.png depth/0006/rain/00032.png 22 | rgb/0006/rain/00014.png depth/0006/rain/00014.png 23 | rgb/0006/rain/00061.png depth/0006/rain/00061.png 24 | rgb/0006/morning/00258.png depth/0006/morning/00258.png 25 | rgb/0006/morning/00212.png depth/0006/morning/00212.png 26 | rgb/0006/morning/00032.png depth/0006/morning/00032.png 27 | rgb/0006/morning/00172.png depth/0006/morning/00172.png 28 | rgb/0006/morning/00193.png depth/0006/morning/00193.png 29 | rgb/0006/morning/00071.png depth/0006/morning/00071.png 30 | rgb/0006/morning/00250.png depth/0006/morning/00250.png 31 | rgb/0006/15-deg-right/00245.png depth/0006/15-deg-right/00245.png 32 | rgb/0006/15-deg-right/00173.png depth/0006/15-deg-right/00173.png 33 | rgb/0006/15-deg-right/00162.png depth/0006/15-deg-right/00162.png 34 | rgb/0006/15-deg-right/00149.png depth/0006/15-deg-right/00149.png 35 | rgb/0006/15-deg-right/00074.png depth/0006/15-deg-right/00074.png 36 | rgb/0006/15-deg-right/00075.png depth/0006/15-deg-right/00075.png 37 | rgb/0006/15-deg-right/00011.png depth/0006/15-deg-right/00011.png 38 | rgb/0006/15-deg-right/00215.png depth/0006/15-deg-right/00215.png 39 | rgb/0006/15-deg-right/00182.png depth/0006/15-deg-right/00182.png 40 | rgb/0006/15-deg-right/00105.png depth/0006/15-deg-right/00105.png 41 | rgb/0006/15-deg-right/00243.png depth/0006/15-deg-right/00243.png 42 | rgb/0006/30-deg-right/00028.png depth/0006/30-deg-right/00028.png 43 | rgb/0006/30-deg-right/00067.png depth/0006/30-deg-right/00067.png 44 | rgb/0006/30-deg-right/00120.png depth/0006/30-deg-right/00120.png 45 | rgb/0006/30-deg-right/00229.png depth/0006/30-deg-right/00229.png 46 | rgb/0006/30-deg-right/00236.png depth/0006/30-deg-right/00236.png 47 | rgb/0006/30-deg-right/00030.png depth/0006/30-deg-right/00030.png 48 | rgb/0006/fog/00160.png depth/0006/fog/00160.png 49 | rgb/0006/30-deg-left/00210.png depth/0006/30-deg-left/00210.png 50 | rgb/0006/30-deg-left/00051.png depth/0006/30-deg-left/00051.png 51 | rgb/0006/30-deg-left/00111.png depth/0006/30-deg-left/00111.png 52 | rgb/0006/30-deg-left/00094.png depth/0006/30-deg-left/00094.png 53 | rgb/0006/30-deg-left/00168.png depth/0006/30-deg-left/00168.png 54 | rgb/0006/30-deg-left/00030.png depth/0006/30-deg-left/00030.png 55 | rgb/0006/30-deg-left/00263.png depth/0006/30-deg-left/00263.png 56 | rgb/0006/30-deg-left/00071.png depth/0006/30-deg-left/00071.png 57 | rgb/0006/30-deg-left/00262.png depth/0006/30-deg-left/00262.png 58 | rgb/0006/overcast/00008.png depth/0006/overcast/00008.png 59 | rgb/0006/overcast/00220.png depth/0006/overcast/00220.png 60 | rgb/0006/overcast/00064.png depth/0006/overcast/00064.png 61 | rgb/0006/overcast/00147.png depth/0006/overcast/00147.png 62 | rgb/0006/overcast/00237.png depth/0006/overcast/00237.png 63 | rgb/0006/overcast/00241.png depth/0006/overcast/00241.png 64 | rgb/0018/clone/00176.png depth/0018/clone/00176.png 65 | rgb/0018/clone/00106.png depth/0018/clone/00106.png 66 | rgb/0018/clone/00080.png depth/0018/clone/00080.png 67 | rgb/0018/clone/00067.png depth/0018/clone/00067.png 68 | rgb/0018/clone/00029.png depth/0018/clone/00029.png 69 | rgb/0018/clone/00306.png depth/0018/clone/00306.png 70 | rgb/0018/clone/00070.png depth/0018/clone/00070.png 71 | rgb/0018/clone/00130.png depth/0018/clone/00130.png 72 | rgb/0018/15-deg-left/00318.png depth/0018/15-deg-left/00318.png 73 | rgb/0018/15-deg-left/00244.png depth/0018/15-deg-left/00244.png 74 | rgb/0018/15-deg-left/00301.png depth/0018/15-deg-left/00301.png 75 | rgb/0018/15-deg-left/00102.png depth/0018/15-deg-left/00102.png 76 | rgb/0018/15-deg-left/00315.png depth/0018/15-deg-left/00315.png 77 | rgb/0018/15-deg-left/00127.png depth/0018/15-deg-left/00127.png 78 | rgb/0018/15-deg-left/00177.png depth/0018/15-deg-left/00177.png 79 | rgb/0018/15-deg-left/00132.png depth/0018/15-deg-left/00132.png 80 | rgb/0018/15-deg-left/00118.png depth/0018/15-deg-left/00118.png 81 | rgb/0018/15-deg-left/00297.png depth/0018/15-deg-left/00297.png 82 | rgb/0018/15-deg-left/00105.png depth/0018/15-deg-left/00105.png 83 | rgb/0018/15-deg-left/00021.png depth/0018/15-deg-left/00021.png 84 | rgb/0018/15-deg-left/00062.png depth/0018/15-deg-left/00062.png 85 | rgb/0018/sunset/00097.png depth/0018/sunset/00097.png 86 | rgb/0018/sunset/00266.png depth/0018/sunset/00266.png 87 | rgb/0018/sunset/00302.png depth/0018/sunset/00302.png 88 | rgb/0018/sunset/00115.png depth/0018/sunset/00115.png 89 | rgb/0018/sunset/00140.png depth/0018/sunset/00140.png 90 | rgb/0018/sunset/00146.png depth/0018/sunset/00146.png 91 | rgb/0018/sunset/00207.png depth/0018/sunset/00207.png 92 | rgb/0018/sunset/00029.png depth/0018/sunset/00029.png 93 | rgb/0018/sunset/00211.png depth/0018/sunset/00211.png 94 | rgb/0018/sunset/00061.png depth/0018/sunset/00061.png 95 | rgb/0018/rain/00253.png depth/0018/rain/00253.png 96 | rgb/0018/rain/00292.png depth/0018/rain/00292.png 97 | rgb/0018/rain/00238.png depth/0018/rain/00238.png 98 | rgb/0018/rain/00191.png depth/0018/rain/00191.png 99 | rgb/0018/rain/00272.png depth/0018/rain/00272.png 100 | rgb/0018/rain/00130.png depth/0018/rain/00130.png 101 | rgb/0018/rain/00250.png depth/0018/rain/00250.png 102 | rgb/0018/morning/00176.png depth/0018/morning/00176.png 103 | rgb/0018/morning/00024.png depth/0018/morning/00024.png 104 | rgb/0018/morning/00076.png depth/0018/morning/00076.png 105 | rgb/0018/morning/00214.png depth/0018/morning/00214.png 106 | rgb/0018/morning/00110.png depth/0018/morning/00110.png 107 | rgb/0018/morning/00316.png depth/0018/morning/00316.png 108 | rgb/0018/morning/00131.png depth/0018/morning/00131.png 109 | rgb/0018/morning/00105.png depth/0018/morning/00105.png 110 | rgb/0018/15-deg-right/00148.png depth/0018/15-deg-right/00148.png 111 | rgb/0018/15-deg-right/00206.png depth/0018/15-deg-right/00206.png 112 | rgb/0018/15-deg-right/00210.png depth/0018/15-deg-right/00210.png 113 | rgb/0018/15-deg-right/00181.png depth/0018/15-deg-right/00181.png 114 | rgb/0018/15-deg-right/00216.png depth/0018/15-deg-right/00216.png 115 | rgb/0018/15-deg-right/00084.png depth/0018/15-deg-right/00084.png 116 | rgb/0018/15-deg-right/00330.png depth/0018/15-deg-right/00330.png 117 | rgb/0018/15-deg-right/00042.png depth/0018/15-deg-right/00042.png 118 | rgb/0018/15-deg-right/00130.png depth/0018/15-deg-right/00130.png 119 | rgb/0018/15-deg-right/00319.png depth/0018/15-deg-right/00319.png 120 | rgb/0018/30-deg-right/00311.png depth/0018/30-deg-right/00311.png 121 | rgb/0018/30-deg-right/00072.png depth/0018/30-deg-right/00072.png 122 | rgb/0018/30-deg-right/00217.png depth/0018/30-deg-right/00217.png 123 | rgb/0018/30-deg-right/00318.png depth/0018/30-deg-right/00318.png 124 | rgb/0018/30-deg-right/00036.png depth/0018/30-deg-right/00036.png 125 | rgb/0018/30-deg-right/00282.png depth/0018/30-deg-right/00282.png 126 | rgb/0018/30-deg-right/00123.png depth/0018/30-deg-right/00123.png 127 | rgb/0018/30-deg-right/00204.png depth/0018/30-deg-right/00204.png 128 | rgb/0018/30-deg-right/00035.png depth/0018/30-deg-right/00035.png 129 | rgb/0018/fog/00203.png depth/0018/fog/00203.png 130 | rgb/0018/fog/00016.png depth/0018/fog/00016.png 131 | rgb/0018/fog/00141.png depth/0018/fog/00141.png 132 | rgb/0018/fog/00275.png depth/0018/fog/00275.png 133 | rgb/0018/fog/00299.png depth/0018/fog/00299.png 134 | rgb/0018/fog/00192.png depth/0018/fog/00192.png 135 | rgb/0018/30-deg-left/00046.png depth/0018/30-deg-left/00046.png 136 | rgb/0018/30-deg-left/00253.png depth/0018/30-deg-left/00253.png 137 | rgb/0018/30-deg-left/00223.png depth/0018/30-deg-left/00223.png 138 | rgb/0018/30-deg-left/00038.png depth/0018/30-deg-left/00038.png 139 | rgb/0018/30-deg-left/00009.png depth/0018/30-deg-left/00009.png 140 | rgb/0018/30-deg-left/00104.png depth/0018/30-deg-left/00104.png 141 | rgb/0018/30-deg-left/00087.png depth/0018/30-deg-left/00087.png 142 | rgb/0018/30-deg-left/00127.png depth/0018/30-deg-left/00127.png 143 | rgb/0018/30-deg-left/00193.png depth/0018/30-deg-left/00193.png 144 | rgb/0018/30-deg-left/00133.png depth/0018/30-deg-left/00133.png 145 | rgb/0018/30-deg-left/00069.png depth/0018/30-deg-left/00069.png 146 | rgb/0018/30-deg-left/00235.png depth/0018/30-deg-left/00235.png 147 | rgb/0018/30-deg-left/00108.png depth/0018/30-deg-left/00108.png 148 | rgb/0018/overcast/00206.png depth/0018/overcast/00206.png 149 | rgb/0018/overcast/00210.png depth/0018/overcast/00210.png 150 | rgb/0018/overcast/00066.png depth/0018/overcast/00066.png 151 | rgb/0018/overcast/00303.png depth/0018/overcast/00303.png 152 | rgb/0018/overcast/00174.png depth/0018/overcast/00174.png 153 | rgb/0018/overcast/00014.png depth/0018/overcast/00014.png 154 | rgb/0018/overcast/00132.png depth/0018/overcast/00132.png 155 | rgb/0018/overcast/00168.png depth/0018/overcast/00168.png 156 | rgb/0018/overcast/00035.png depth/0018/overcast/00035.png 157 | rgb/0001/clone/00085.png depth/0001/clone/00085.png 158 | rgb/0001/clone/00082.png depth/0001/clone/00082.png 159 | rgb/0001/clone/00161.png depth/0001/clone/00161.png 160 | rgb/0001/clone/00074.png depth/0001/clone/00074.png 161 | rgb/0001/clone/00356.png depth/0001/clone/00356.png 162 | rgb/0001/clone/00355.png depth/0001/clone/00355.png 163 | rgb/0001/clone/00261.png depth/0001/clone/00261.png 164 | rgb/0001/clone/00362.png depth/0001/clone/00362.png 165 | rgb/0001/clone/00284.png depth/0001/clone/00284.png 166 | rgb/0001/clone/00349.png depth/0001/clone/00349.png 167 | rgb/0001/15-deg-left/00025.png depth/0001/15-deg-left/00025.png 168 | rgb/0001/15-deg-left/00221.png depth/0001/15-deg-left/00221.png 169 | rgb/0001/15-deg-left/00218.png depth/0001/15-deg-left/00218.png 170 | rgb/0001/15-deg-left/00018.png depth/0001/15-deg-left/00018.png 171 | rgb/0001/15-deg-left/00320.png depth/0001/15-deg-left/00320.png 172 | rgb/0001/15-deg-left/00216.png depth/0001/15-deg-left/00216.png 173 | rgb/0001/15-deg-left/00303.png depth/0001/15-deg-left/00303.png 174 | rgb/0001/15-deg-left/00359.png depth/0001/15-deg-left/00359.png 175 | rgb/0001/15-deg-left/00283.png depth/0001/15-deg-left/00283.png 176 | rgb/0001/15-deg-left/00315.png depth/0001/15-deg-left/00315.png 177 | rgb/0001/15-deg-left/00410.png depth/0001/15-deg-left/00410.png 178 | rgb/0001/15-deg-left/00017.png depth/0001/15-deg-left/00017.png 179 | rgb/0001/sunset/00080.png depth/0001/sunset/00080.png 180 | rgb/0001/sunset/00038.png depth/0001/sunset/00038.png 181 | rgb/0001/sunset/00121.png depth/0001/sunset/00121.png 182 | rgb/0001/sunset/00376.png depth/0001/sunset/00376.png 183 | rgb/0001/sunset/00207.png depth/0001/sunset/00207.png 184 | rgb/0001/sunset/00029.png depth/0001/sunset/00029.png 185 | rgb/0001/sunset/00132.png depth/0001/sunset/00132.png 186 | rgb/0001/sunset/00167.png depth/0001/sunset/00167.png 187 | rgb/0001/rain/00294.png depth/0001/rain/00294.png 188 | rgb/0001/rain/00239.png depth/0001/rain/00239.png 189 | rgb/0001/rain/00266.png depth/0001/rain/00266.png 190 | rgb/0001/rain/00268.png depth/0001/rain/00268.png 191 | rgb/0001/rain/00270.png depth/0001/rain/00270.png 192 | rgb/0001/rain/00238.png depth/0001/rain/00238.png 193 | rgb/0001/rain/00342.png depth/0001/rain/00342.png 194 | rgb/0001/rain/00055.png depth/0001/rain/00055.png 195 | rgb/0001/morning/00423.png depth/0001/morning/00423.png 196 | rgb/0001/morning/00429.png depth/0001/morning/00429.png 197 | rgb/0001/morning/00080.png depth/0001/morning/00080.png 198 | rgb/0001/morning/00408.png depth/0001/morning/00408.png 199 | rgb/0001/morning/00068.png depth/0001/morning/00068.png 200 | rgb/0001/morning/00406.png depth/0001/morning/00406.png 201 | rgb/0001/morning/00167.png depth/0001/morning/00167.png 202 | rgb/0001/morning/00133.png depth/0001/morning/00133.png 203 | rgb/0001/morning/00436.png depth/0001/morning/00436.png 204 | rgb/0001/morning/00071.png depth/0001/morning/00071.png 205 | rgb/0001/15-deg-right/00415.png depth/0001/15-deg-right/00415.png 206 | rgb/0001/15-deg-right/00325.png depth/0001/15-deg-right/00325.png 207 | rgb/0001/15-deg-right/00163.png depth/0001/15-deg-right/00163.png 208 | rgb/0001/15-deg-right/00089.png depth/0001/15-deg-right/00089.png 209 | rgb/0001/15-deg-right/00003.png depth/0001/15-deg-right/00003.png 210 | rgb/0001/15-deg-right/00257.png depth/0001/15-deg-right/00257.png 211 | rgb/0001/15-deg-right/00050.png depth/0001/15-deg-right/00050.png 212 | rgb/0001/15-deg-right/00054.png depth/0001/15-deg-right/00054.png 213 | rgb/0001/15-deg-right/00069.png depth/0001/15-deg-right/00069.png 214 | rgb/0001/15-deg-right/00037.png depth/0001/15-deg-right/00037.png 215 | rgb/0001/15-deg-right/00255.png depth/0001/15-deg-right/00255.png 216 | rgb/0001/30-deg-right/00253.png depth/0001/30-deg-right/00253.png 217 | rgb/0001/30-deg-right/00429.png depth/0001/30-deg-right/00429.png 218 | rgb/0001/30-deg-right/00159.png depth/0001/30-deg-right/00159.png 219 | rgb/0001/30-deg-right/00325.png depth/0001/30-deg-right/00325.png 220 | rgb/0001/30-deg-right/00165.png depth/0001/30-deg-right/00165.png 221 | rgb/0001/30-deg-right/00160.png depth/0001/30-deg-right/00160.png 222 | rgb/0001/30-deg-right/00368.png depth/0001/30-deg-right/00368.png 223 | rgb/0001/30-deg-right/00029.png depth/0001/30-deg-right/00029.png 224 | rgb/0001/30-deg-right/00390.png depth/0001/30-deg-right/00390.png 225 | rgb/0001/30-deg-right/00017.png depth/0001/30-deg-right/00017.png 226 | rgb/0001/30-deg-right/00341.png depth/0001/30-deg-right/00341.png 227 | rgb/0001/fog/00326.png depth/0001/fog/00326.png 228 | rgb/0001/fog/00246.png depth/0001/fog/00246.png 229 | rgb/0001/fog/00090.png depth/0001/fog/00090.png 230 | rgb/0001/fog/00063.png depth/0001/fog/00063.png 231 | rgb/0001/fog/00233.png depth/0001/fog/00233.png 232 | rgb/0001/fog/00419.png depth/0001/fog/00419.png 233 | rgb/0001/fog/00352.png depth/0001/fog/00352.png 234 | rgb/0001/fog/00244.png depth/0001/fog/00244.png 235 | rgb/0001/fog/00404.png depth/0001/fog/00404.png 236 | rgb/0001/fog/00273.png depth/0001/fog/00273.png 237 | rgb/0001/fog/00019.png depth/0001/fog/00019.png 238 | rgb/0001/fog/00104.png depth/0001/fog/00104.png 239 | rgb/0001/fog/00344.png depth/0001/fog/00344.png 240 | rgb/0001/fog/00367.png depth/0001/fog/00367.png 241 | rgb/0001/fog/00312.png depth/0001/fog/00312.png 242 | rgb/0001/fog/00227.png depth/0001/fog/00227.png 243 | rgb/0001/30-deg-left/00348.png depth/0001/30-deg-left/00348.png 244 | rgb/0001/30-deg-left/00067.png depth/0001/30-deg-left/00067.png 245 | rgb/0001/30-deg-left/00038.png depth/0001/30-deg-left/00038.png 246 | rgb/0001/30-deg-left/00347.png depth/0001/30-deg-left/00347.png 247 | rgb/0001/30-deg-left/00345.png depth/0001/30-deg-left/00345.png 248 | rgb/0001/30-deg-left/00222.png depth/0001/30-deg-left/00222.png 249 | rgb/0001/30-deg-left/00270.png depth/0001/30-deg-left/00270.png 250 | rgb/0001/30-deg-left/00014.png depth/0001/30-deg-left/00014.png 251 | rgb/0001/30-deg-left/00134.png depth/0001/30-deg-left/00134.png 252 | rgb/0001/30-deg-left/00015.png depth/0001/30-deg-left/00015.png 253 | rgb/0001/overcast/00403.png depth/0001/overcast/00403.png 254 | rgb/0001/overcast/00377.png depth/0001/overcast/00377.png 255 | rgb/0001/overcast/00393.png depth/0001/overcast/00393.png 256 | rgb/0001/overcast/00120.png depth/0001/overcast/00120.png 257 | rgb/0001/overcast/00300.png depth/0001/overcast/00300.png 258 | rgb/0001/overcast/00003.png depth/0001/overcast/00003.png 259 | rgb/0001/overcast/00301.png depth/0001/overcast/00301.png 260 | rgb/0001/overcast/00141.png depth/0001/overcast/00141.png 261 | rgb/0002/clone/00152.png depth/0002/clone/00152.png 262 | rgb/0002/clone/00188.png depth/0002/clone/00188.png 263 | rgb/0002/clone/00162.png depth/0002/clone/00162.png 264 | rgb/0002/clone/00075.png depth/0002/clone/00075.png 265 | rgb/0002/clone/00032.png depth/0002/clone/00032.png 266 | rgb/0002/clone/00202.png depth/0002/clone/00202.png 267 | rgb/0002/clone/00187.png depth/0002/clone/00187.png 268 | rgb/0002/clone/00084.png depth/0002/clone/00084.png 269 | rgb/0002/clone/00141.png depth/0002/clone/00141.png 270 | rgb/0002/clone/00225.png depth/0002/clone/00225.png 271 | rgb/0002/clone/00201.png depth/0002/clone/00201.png 272 | rgb/0002/clone/00136.png depth/0002/clone/00136.png 273 | rgb/0002/clone/00065.png depth/0002/clone/00065.png 274 | rgb/0002/clone/00035.png depth/0002/clone/00035.png 275 | rgb/0002/15-deg-left/00046.png depth/0002/15-deg-left/00046.png 276 | rgb/0002/15-deg-left/00000.png depth/0002/15-deg-left/00000.png 277 | rgb/0002/15-deg-left/00190.png depth/0002/15-deg-left/00190.png 278 | rgb/0002/15-deg-left/00088.png depth/0002/15-deg-left/00088.png 279 | rgb/0002/15-deg-left/00059.png depth/0002/15-deg-left/00059.png 280 | rgb/0002/15-deg-left/00198.png depth/0002/15-deg-left/00198.png 281 | rgb/0002/15-deg-left/00168.png depth/0002/15-deg-left/00168.png 282 | rgb/0002/15-deg-left/00192.png depth/0002/15-deg-left/00192.png 283 | rgb/0002/15-deg-left/00026.png depth/0002/15-deg-left/00026.png 284 | rgb/0002/15-deg-left/00006.png depth/0002/15-deg-left/00006.png 285 | rgb/0002/15-deg-left/00096.png depth/0002/15-deg-left/00096.png 286 | rgb/0002/sunset/00148.png depth/0002/sunset/00148.png 287 | rgb/0002/sunset/00187.png depth/0002/sunset/00187.png 288 | rgb/0002/rain/00004.png depth/0002/rain/00004.png 289 | rgb/0002/rain/00203.png depth/0002/rain/00203.png 290 | rgb/0002/rain/00220.png depth/0002/rain/00220.png 291 | rgb/0002/rain/00137.png depth/0002/rain/00137.png 292 | rgb/0002/rain/00100.png depth/0002/rain/00100.png 293 | rgb/0002/morning/00157.png depth/0002/morning/00157.png 294 | rgb/0002/morning/00206.png depth/0002/morning/00206.png 295 | rgb/0002/morning/00081.png depth/0002/morning/00081.png 296 | rgb/0002/morning/00060.png depth/0002/morning/00060.png 297 | rgb/0002/morning/00100.png depth/0002/morning/00100.png 298 | rgb/0002/15-deg-right/00176.png depth/0002/15-deg-right/00176.png 299 | rgb/0002/15-deg-right/00076.png depth/0002/15-deg-right/00076.png 300 | rgb/0002/15-deg-right/00221.png depth/0002/15-deg-right/00221.png 301 | rgb/0002/15-deg-right/00045.png depth/0002/15-deg-right/00045.png 302 | rgb/0002/15-deg-right/00055.png depth/0002/15-deg-right/00055.png 303 | rgb/0002/30-deg-right/00112.png depth/0002/30-deg-right/00112.png 304 | rgb/0002/30-deg-right/00107.png depth/0002/30-deg-right/00107.png 305 | rgb/0002/30-deg-right/00069.png depth/0002/30-deg-right/00069.png 306 | rgb/0002/fog/00115.png depth/0002/fog/00115.png 307 | rgb/0002/fog/00104.png depth/0002/fog/00104.png 308 | rgb/0002/fog/00219.png depth/0002/fog/00219.png 309 | rgb/0002/fog/00058.png depth/0002/fog/00058.png 310 | rgb/0002/30-deg-left/00085.png depth/0002/30-deg-left/00085.png 311 | rgb/0002/30-deg-left/00081.png depth/0002/30-deg-left/00081.png 312 | rgb/0002/30-deg-left/00164.png depth/0002/30-deg-left/00164.png 313 | rgb/0002/30-deg-left/00232.png depth/0002/30-deg-left/00232.png 314 | rgb/0002/30-deg-left/00003.png depth/0002/30-deg-left/00003.png 315 | rgb/0002/30-deg-left/00094.png depth/0002/30-deg-left/00094.png 316 | rgb/0002/overcast/00098.png depth/0002/overcast/00098.png 317 | rgb/0002/overcast/00190.png depth/0002/overcast/00190.png 318 | rgb/0002/overcast/00081.png depth/0002/overcast/00081.png 319 | rgb/0002/overcast/00164.png depth/0002/overcast/00164.png 320 | rgb/0002/overcast/00029.png depth/0002/overcast/00029.png 321 | rgb/0002/overcast/00192.png depth/0002/overcast/00192.png 322 | rgb/0002/overcast/00056.png depth/0002/overcast/00056.png 323 | rgb/0002/overcast/00204.png depth/0002/overcast/00204.png 324 | rgb/0002/overcast/00035.png depth/0002/overcast/00035.png 325 | rgb/0020/clone/00640.png depth/0020/clone/00640.png 326 | rgb/0020/clone/00081.png depth/0020/clone/00081.png 327 | rgb/0020/clone/00418.png depth/0020/clone/00418.png 328 | rgb/0020/clone/00439.png depth/0020/clone/00439.png 329 | rgb/0020/clone/00741.png depth/0020/clone/00741.png 330 | rgb/0020/clone/00091.png depth/0020/clone/00091.png 331 | rgb/0020/clone/00712.png depth/0020/clone/00712.png 332 | rgb/0020/clone/00166.png depth/0020/clone/00166.png 333 | rgb/0020/clone/00198.png depth/0020/clone/00198.png 334 | rgb/0020/clone/00363.png depth/0020/clone/00363.png 335 | rgb/0020/clone/00124.png depth/0020/clone/00124.png 336 | rgb/0020/clone/00700.png depth/0020/clone/00700.png 337 | rgb/0020/clone/00748.png depth/0020/clone/00748.png 338 | rgb/0020/clone/00751.png depth/0020/clone/00751.png 339 | rgb/0020/15-deg-left/00046.png depth/0020/15-deg-left/00046.png 340 | rgb/0020/15-deg-left/00594.png depth/0020/15-deg-left/00594.png 341 | rgb/0020/15-deg-left/00732.png depth/0020/15-deg-left/00732.png 342 | rgb/0020/15-deg-left/00212.png depth/0020/15-deg-left/00212.png 343 | rgb/0020/15-deg-left/00488.png depth/0020/15-deg-left/00488.png 344 | rgb/0020/15-deg-left/00630.png depth/0020/15-deg-left/00630.png 345 | rgb/0020/15-deg-left/00601.png depth/0020/15-deg-left/00601.png 346 | rgb/0020/15-deg-left/00578.png depth/0020/15-deg-left/00578.png 347 | rgb/0020/15-deg-left/00628.png depth/0020/15-deg-left/00628.png 348 | rgb/0020/15-deg-left/00691.png depth/0020/15-deg-left/00691.png 349 | rgb/0020/15-deg-left/00453.png depth/0020/15-deg-left/00453.png 350 | rgb/0020/15-deg-left/00370.png depth/0020/15-deg-left/00370.png 351 | rgb/0020/15-deg-left/00776.png depth/0020/15-deg-left/00776.png 352 | rgb/0020/15-deg-left/00582.png depth/0020/15-deg-left/00582.png 353 | rgb/0020/15-deg-left/00168.png depth/0020/15-deg-left/00168.png 354 | rgb/0020/15-deg-left/00521.png depth/0020/15-deg-left/00521.png 355 | rgb/0020/15-deg-left/00773.png depth/0020/15-deg-left/00773.png 356 | rgb/0020/15-deg-left/00714.png depth/0020/15-deg-left/00714.png 357 | rgb/0020/15-deg-left/00481.png depth/0020/15-deg-left/00481.png 358 | rgb/0020/15-deg-left/00561.png depth/0020/15-deg-left/00561.png 359 | rgb/0020/15-deg-left/00047.png depth/0020/15-deg-left/00047.png 360 | rgb/0020/15-deg-left/00396.png depth/0020/15-deg-left/00396.png 361 | rgb/0020/sunset/00805.png depth/0020/sunset/00805.png 362 | rgb/0020/sunset/00514.png depth/0020/sunset/00514.png 363 | rgb/0020/sunset/00592.png depth/0020/sunset/00592.png 364 | rgb/0020/sunset/00557.png depth/0020/sunset/00557.png 365 | rgb/0020/sunset/00163.png depth/0020/sunset/00163.png 366 | rgb/0020/sunset/00183.png depth/0020/sunset/00183.png 367 | rgb/0020/sunset/00686.png depth/0020/sunset/00686.png 368 | rgb/0020/sunset/00715.png depth/0020/sunset/00715.png 369 | rgb/0020/sunset/00411.png depth/0020/sunset/00411.png 370 | rgb/0020/sunset/00768.png depth/0020/sunset/00768.png 371 | rgb/0020/sunset/00682.png depth/0020/sunset/00682.png 372 | rgb/0020/sunset/00146.png depth/0020/sunset/00146.png 373 | rgb/0020/sunset/00644.png depth/0020/sunset/00644.png 374 | rgb/0020/sunset/00338.png depth/0020/sunset/00338.png 375 | rgb/0020/sunset/00056.png depth/0020/sunset/00056.png 376 | rgb/0020/sunset/00664.png depth/0020/sunset/00664.png 377 | rgb/0020/sunset/00297.png depth/0020/sunset/00297.png 378 | rgb/0020/sunset/00093.png depth/0020/sunset/00093.png 379 | rgb/0020/sunset/00706.png depth/0020/sunset/00706.png 380 | rgb/0020/sunset/00618.png depth/0020/sunset/00618.png 381 | rgb/0020/rain/00640.png depth/0020/rain/00640.png 382 | rgb/0020/rain/00324.png depth/0020/rain/00324.png 383 | rgb/0020/rain/00139.png depth/0020/rain/00139.png 384 | rgb/0020/rain/00066.png depth/0020/rain/00066.png 385 | rgb/0020/rain/00126.png depth/0020/rain/00126.png 386 | rgb/0020/rain/00146.png depth/0020/rain/00146.png 387 | rgb/0020/rain/00344.png depth/0020/rain/00344.png 388 | rgb/0020/rain/00440.png depth/0020/rain/00440.png 389 | rgb/0020/rain/00773.png depth/0020/rain/00773.png 390 | rgb/0020/rain/00795.png depth/0020/rain/00795.png 391 | rgb/0020/rain/00777.png depth/0020/rain/00777.png 392 | rgb/0020/rain/00675.png depth/0020/rain/00675.png 393 | rgb/0020/rain/00544.png depth/0020/rain/00544.png 394 | rgb/0020/rain/00319.png depth/0020/rain/00319.png 395 | rgb/0020/rain/00012.png depth/0020/rain/00012.png 396 | rgb/0020/morning/00654.png depth/0020/morning/00654.png 397 | rgb/0020/morning/00473.png depth/0020/morning/00473.png 398 | rgb/0020/morning/00259.png depth/0020/morning/00259.png 399 | rgb/0020/morning/00082.png depth/0020/morning/00082.png 400 | rgb/0020/morning/00090.png depth/0020/morning/00090.png 401 | rgb/0020/morning/00645.png depth/0020/morning/00645.png 402 | rgb/0020/morning/00447.png depth/0020/morning/00447.png 403 | rgb/0020/morning/00336.png depth/0020/morning/00336.png 404 | rgb/0020/morning/00278.png depth/0020/morning/00278.png 405 | rgb/0020/morning/00438.png depth/0020/morning/00438.png 406 | rgb/0020/morning/00691.png depth/0020/morning/00691.png 407 | rgb/0020/morning/00171.png depth/0020/morning/00171.png 408 | rgb/0020/morning/00605.png depth/0020/morning/00605.png 409 | rgb/0020/morning/00435.png depth/0020/morning/00435.png 410 | rgb/0020/morning/00409.png depth/0020/morning/00409.png 411 | rgb/0020/morning/00169.png depth/0020/morning/00169.png 412 | rgb/0020/morning/00669.png depth/0020/morning/00669.png 413 | rgb/0020/morning/00138.png depth/0020/morning/00138.png 414 | rgb/0020/morning/00572.png depth/0020/morning/00572.png 415 | rgb/0020/morning/00598.png depth/0020/morning/00598.png 416 | rgb/0020/15-deg-right/00584.png depth/0020/15-deg-right/00584.png 417 | rgb/0020/15-deg-right/00148.png depth/0020/15-deg-right/00148.png 418 | rgb/0020/15-deg-right/00709.png depth/0020/15-deg-right/00709.png 419 | rgb/0020/15-deg-right/00223.png depth/0020/15-deg-right/00223.png 420 | rgb/0020/15-deg-right/00650.png depth/0020/15-deg-right/00650.png 421 | rgb/0020/15-deg-right/00581.png depth/0020/15-deg-right/00581.png 422 | rgb/0020/15-deg-right/00537.png depth/0020/15-deg-right/00537.png 423 | rgb/0020/15-deg-right/00505.png depth/0020/15-deg-right/00505.png 424 | rgb/0020/15-deg-right/00339.png depth/0020/15-deg-right/00339.png 425 | rgb/0020/15-deg-right/00218.png depth/0020/15-deg-right/00218.png 426 | rgb/0020/15-deg-right/00068.png depth/0020/15-deg-right/00068.png 427 | rgb/0020/15-deg-right/00487.png depth/0020/15-deg-right/00487.png 428 | rgb/0020/15-deg-right/00477.png depth/0020/15-deg-right/00477.png 429 | rgb/0020/15-deg-right/00273.png depth/0020/15-deg-right/00273.png 430 | rgb/0020/15-deg-right/00578.png depth/0020/15-deg-right/00578.png 431 | rgb/0020/15-deg-right/00682.png depth/0020/15-deg-right/00682.png 432 | rgb/0020/15-deg-right/00666.png depth/0020/15-deg-right/00666.png 433 | rgb/0020/15-deg-right/00767.png depth/0020/15-deg-right/00767.png 434 | rgb/0020/15-deg-right/00117.png depth/0020/15-deg-right/00117.png 435 | rgb/0020/15-deg-right/00522.png depth/0020/15-deg-right/00522.png 436 | rgb/0020/15-deg-right/00308.png depth/0020/15-deg-right/00308.png 437 | rgb/0020/15-deg-right/00127.png depth/0020/15-deg-right/00127.png 438 | rgb/0020/15-deg-right/00029.png depth/0020/15-deg-right/00029.png 439 | rgb/0020/15-deg-right/00497.png depth/0020/15-deg-right/00497.png 440 | rgb/0020/15-deg-right/00026.png depth/0020/15-deg-right/00026.png 441 | rgb/0020/15-deg-right/00265.png depth/0020/15-deg-right/00265.png 442 | rgb/0020/15-deg-right/00802.png depth/0020/15-deg-right/00802.png 443 | rgb/0020/15-deg-right/00396.png depth/0020/15-deg-right/00396.png 444 | rgb/0020/30-deg-right/00622.png depth/0020/30-deg-right/00622.png 445 | rgb/0020/30-deg-right/00415.png depth/0020/30-deg-right/00415.png 446 | rgb/0020/30-deg-right/00514.png depth/0020/30-deg-right/00514.png 447 | rgb/0020/30-deg-right/00333.png depth/0020/30-deg-right/00333.png 448 | rgb/0020/30-deg-right/00611.png depth/0020/30-deg-right/00611.png 449 | rgb/0020/30-deg-right/00223.png depth/0020/30-deg-right/00223.png 450 | rgb/0020/30-deg-right/00472.png depth/0020/30-deg-right/00472.png 451 | rgb/0020/30-deg-right/00526.png depth/0020/30-deg-right/00526.png 452 | rgb/0020/30-deg-right/00734.png depth/0020/30-deg-right/00734.png 453 | rgb/0020/30-deg-right/00484.png depth/0020/30-deg-right/00484.png 454 | rgb/0020/30-deg-right/00120.png depth/0020/30-deg-right/00120.png 455 | rgb/0020/30-deg-right/00758.png depth/0020/30-deg-right/00758.png 456 | rgb/0020/30-deg-right/00556.png depth/0020/30-deg-right/00556.png 457 | rgb/0020/30-deg-right/00014.png depth/0020/30-deg-right/00014.png 458 | rgb/0020/30-deg-right/00198.png depth/0020/30-deg-right/00198.png 459 | rgb/0020/30-deg-right/00813.png depth/0020/30-deg-right/00813.png 460 | rgb/0020/30-deg-right/00797.png depth/0020/30-deg-right/00797.png 461 | rgb/0020/30-deg-right/00748.png depth/0020/30-deg-right/00748.png 462 | rgb/0020/30-deg-right/00209.png depth/0020/30-deg-right/00209.png 463 | rgb/0020/30-deg-right/00047.png depth/0020/30-deg-right/00047.png 464 | rgb/0020/30-deg-right/00237.png depth/0020/30-deg-right/00237.png 465 | rgb/0020/30-deg-right/00101.png depth/0020/30-deg-right/00101.png 466 | rgb/0020/30-deg-right/00389.png depth/0020/30-deg-right/00389.png 467 | rgb/0020/30-deg-right/00071.png depth/0020/30-deg-right/00071.png 468 | rgb/0020/30-deg-right/00757.png depth/0020/30-deg-right/00757.png 469 | rgb/0020/30-deg-right/00096.png depth/0020/30-deg-right/00096.png 470 | rgb/0020/30-deg-right/00824.png depth/0020/30-deg-right/00824.png 471 | rgb/0020/30-deg-right/00108.png depth/0020/30-deg-right/00108.png 472 | rgb/0020/fog/00806.png depth/0020/fog/00806.png 473 | rgb/0020/fog/00369.png depth/0020/fog/00369.png 474 | rgb/0020/fog/00473.png depth/0020/fog/00473.png 475 | rgb/0020/fog/00781.png depth/0020/fog/00781.png 476 | rgb/0020/fog/00183.png depth/0020/fog/00183.png 477 | rgb/0020/fog/00269.png depth/0020/fog/00269.png 478 | rgb/0020/fog/00735.png depth/0020/fog/00735.png 479 | rgb/0020/fog/00273.png depth/0020/fog/00273.png 480 | rgb/0020/fog/00229.png depth/0020/fog/00229.png 481 | rgb/0020/fog/00542.png depth/0020/fog/00542.png 482 | rgb/0020/fog/00219.png depth/0020/fog/00219.png 483 | rgb/0020/fog/00406.png depth/0020/fog/00406.png 484 | rgb/0020/fog/00580.png depth/0020/fog/00580.png 485 | rgb/0020/fog/00635.png depth/0020/fog/00635.png 486 | rgb/0020/fog/00110.png depth/0020/fog/00110.png 487 | rgb/0020/fog/00427.png depth/0020/fog/00427.png 488 | rgb/0020/fog/00820.png depth/0020/fog/00820.png 489 | rgb/0020/fog/00101.png depth/0020/fog/00101.png 490 | rgb/0020/fog/00201.png depth/0020/fog/00201.png 491 | rgb/0020/fog/00536.png depth/0020/fog/00536.png 492 | rgb/0020/fog/00704.png depth/0020/fog/00704.png 493 | rgb/0020/fog/00061.png depth/0020/fog/00061.png 494 | rgb/0020/30-deg-left/00294.png depth/0020/30-deg-left/00294.png 495 | rgb/0020/30-deg-left/00384.png depth/0020/30-deg-left/00384.png 496 | rgb/0020/30-deg-left/00031.png depth/0020/30-deg-left/00031.png 497 | rgb/0020/30-deg-left/00804.png depth/0020/30-deg-left/00804.png 498 | rgb/0020/30-deg-left/00335.png depth/0020/30-deg-left/00335.png 499 | rgb/0020/30-deg-left/00476.png depth/0020/30-deg-left/00476.png 500 | rgb/0020/30-deg-left/00285.png depth/0020/30-deg-left/00285.png 501 | -------------------------------------------------------------------------------- /img/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/img/framework.png -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from models.base_model import BaseModel 3 | 4 | 5 | def find_model_using_name(model_name): 6 | # Given the option --model [modelname], 7 | # the file "models/modelname_model.py" 8 | # will be imported. 9 | 10 | model_filename = "models." + model_name + "_model" 11 | modellib = importlib.import_module(model_filename) 12 | 13 | # In the file, the class called ModelNameModel() will 14 | # be instantiated. It has to be a subclass of BaseModel, 15 | # and it is case-insensitive. 16 | model = None 17 | target_model_name = model_name.replace('_', '') + 'model' 18 | for name, cls in modellib.__dict__.items(): 19 | if name.lower() == target_model_name.lower() \ 20 | and issubclass(cls, BaseModel): 21 | model = cls 22 | 23 | if model is None: 24 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 25 | exit(0) 26 | 27 | return model 28 | 29 | 30 | def get_option_setter(model_name): 31 | model_class = find_model_using_name(model_name) 32 | return model_class.modify_commandline_options 33 | 34 | 35 | def create_model(opt): 36 | model = find_model_using_name(opt.model) 37 | instance = model() 38 | instance.initialize(opt) 39 | print("model [%s] was created" % (instance.name())) 40 | return instance 41 | -------------------------------------------------------------------------------- /models/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/models/__init__.pyc -------------------------------------------------------------------------------- /models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from collections import OrderedDict 4 | from . import networks 5 | 6 | 7 | class BaseModel(): 8 | 9 | # modify parser to add command line options, 10 | # and also change the default values if needed 11 | @staticmethod 12 | def modify_commandline_options(parser, is_train): 13 | return parser 14 | 15 | def name(self): 16 | return 'BaseModel' 17 | 18 | def initialize(self, opt): 19 | self.opt = opt 20 | self.gpu_ids = opt.gpu_ids 21 | self.isTrain = opt.isTrain 22 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') 23 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.expr_name) 24 | torch.backends.cudnn.benchmark = True 25 | self.loss_names = [] 26 | self.model_names = [] 27 | self.visual_names = [] 28 | self.image_paths = [] 29 | 30 | def set_input(self, input): 31 | self.input = input 32 | 33 | def forward(self): 34 | pass 35 | 36 | # load and print networks; create schedulers 37 | def setup(self, opt, parser=None): 38 | if self.isTrain: 39 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] 40 | 41 | if not self.isTrain or opt.continue_train: 42 | self.load_networks(opt.which_epoch) 43 | 44 | # make models eval mode during test time 45 | def eval(self): 46 | for name in self.model_names: 47 | if isinstance(name, str): 48 | net = getattr(self, 'net' + name) 49 | net.eval() 50 | 51 | # used in test time, wrapping `forward` in no_grad() so we don't save 52 | # intermediate steps for backprop 53 | def test(self): 54 | with torch.no_grad(): 55 | self.forward() 56 | 57 | # get image paths 58 | def get_image_paths(self): 59 | return self.image_paths 60 | 61 | def optimize_parameters(self): 62 | pass 63 | 64 | # update learning rate (called once every epoch) 65 | def update_learning_rate(self): 66 | for scheduler in self.schedulers: 67 | scheduler.step() 68 | lr = self.optimizers[0].param_groups[0]['lr'] 69 | print('learning rate = %.7f' % lr) 70 | 71 | return lr 72 | 73 | # return visualization images. train.py will display these images, and save the images to a html 74 | def get_current_visuals(self): 75 | visual_ret = OrderedDict() 76 | for name in self.visual_names: 77 | if isinstance(name, str): 78 | visual_ret[name] = getattr(self, name) 79 | return visual_ret 80 | 81 | # return traning losses/errors. train.py will print out these errors as debugging information 82 | def get_current_losses(self): 83 | errors_ret = OrderedDict() 84 | for name in self.loss_names: 85 | if isinstance(name, str): 86 | # float(...) works for both scalar tensor and float number 87 | errors_ret[name] = float(getattr(self, 'loss_' + name)) 88 | return errors_ret 89 | 90 | # save models to the disk 91 | def save_networks(self, which_epoch): 92 | for name in self.model_names: 93 | if isinstance(name, str): 94 | save_filename = '%s_net_%s.pth' % (which_epoch, name) 95 | save_path = os.path.join(self.save_dir, save_filename) 96 | net = getattr(self, 'net' + name) 97 | 98 | if len(self.gpu_ids) > 0 and torch.cuda.is_available(): 99 | torch.save(net.module.cpu().state_dict(), save_path) 100 | net.cuda(self.gpu_ids[0]) 101 | else: 102 | torch.save(net.cpu().state_dict(), save_path) 103 | 104 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 105 | key = keys[i] 106 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 107 | if module.__class__.__name__.startswith('InstanceNorm') and \ 108 | (key == 'running_mean' or key == 'running_var'): 109 | if getattr(module, key) is None: 110 | state_dict.pop('.'.join(keys)) 111 | if module.__class__.__name__.startswith('InstanceNorm') and \ 112 | (key == 'num_batches_tracked'): 113 | state_dict.pop('.'.join(keys)) 114 | else: 115 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 116 | 117 | def init_with_pretrained_model(self, model_name, pretrained=""): 118 | 119 | if not pretrained == " ": 120 | net = getattr(self, 'net'+model_name) 121 | if isinstance(net, torch.nn.DataParallel): 122 | net = net.module 123 | state_dict = torch.load(pretrained, map_location=str(self.device)) 124 | del state_dict._metadata 125 | 126 | for key in list(state_dict.keys()): 127 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 128 | net.load_state_dict(state_dict) 129 | print("initialize {} with {}".format(model_name, pretrained)) 130 | 131 | # load models from the disk 132 | def load_networks(self, which_epoch): 133 | 134 | for name in self.model_names: 135 | if isinstance(name, str): 136 | load_filename = '%s_net_%s.pth' % (which_epoch, name) 137 | load_path = os.path.join(self.save_dir, load_filename) 138 | net = getattr(self, 'net' + name) 139 | if isinstance(net, torch.nn.DataParallel): 140 | net = net.module 141 | print('loading the model from %s' % load_path) 142 | # if you are using PyTorch newer than 0.4 (e.g., built from 143 | # GitHub source), you can remove str() on self.device 144 | state_dict = torch.load(load_path, map_location=str(self.device)) 145 | del state_dict._metadata 146 | 147 | # patch InstanceNorm checkpoints prior to 0.4 148 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop 149 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 150 | net.load_state_dict(state_dict) 151 | 152 | 153 | # print network information 154 | def print_networks(self, verbose): 155 | print('---------- Networks initialized -------------') 156 | for name in self.model_names: 157 | if isinstance(name, str): 158 | net = getattr(self, 'net' + name) 159 | num_params = 0 160 | for param in net.parameters(): 161 | num_params += param.numel() 162 | if verbose: 163 | print(net) 164 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 165 | print('-----------------------------------------------') 166 | 167 | # set requies_grad=Fasle to avoid computation 168 | def set_requires_grad(self, nets, requires_grad=False): 169 | if not isinstance(nets, list): 170 | nets = [nets] 171 | for net in nets: 172 | if net is not None: 173 | for param in net.parameters(): 174 | param.requires_grad = requires_grad 175 | -------------------------------------------------------------------------------- /models/base_model.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/models/base_model.pyc -------------------------------------------------------------------------------- /models/fs_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from .base_model import BaseModel 4 | from . import networks 5 | from utils.image_pool import ImagePool 6 | import torch.nn.functional as F 7 | from utils import dataset_util 8 | 9 | class FSModel(BaseModel): 10 | def name(self): 11 | return 'FSModel' 12 | 13 | @staticmethod 14 | def modify_commandline_options(parser, is_train=True): 15 | 16 | parser.set_defaults(no_dropout=True) 17 | if is_train: 18 | parser.add_argument('--lambda_R_Depth', type=float, default=1.0, help='weight for reconstruction loss') 19 | parser.add_argument('--lambda_S_Depth', type=float, default=0.01, help='weight for smooth loss') 20 | 21 | parser.add_argument('--lambda_R_Img', type=float, default=1.0,help='weight for image reconstruction') 22 | 23 | parser.add_argument('--g_src_premodel', type=str, default=" ",help='pretrained G_Src model') 24 | 25 | return parser 26 | 27 | def initialize(self, opt): 28 | BaseModel.initialize(self, opt) 29 | 30 | if self.isTrain: 31 | self.loss_names = ['R_Depth_Src', 'S_Depth_Tgt', 'R_Img_Tgt'] 32 | 33 | if self.isTrain: 34 | self.visual_names = ['src_img', 'fake_tgt', 'src_real_depth', 'src_gen_depth', 'tgt_left_img', 'tgt_gen_depth', 'warp_tgt_img', 'tgt_right_img'] 35 | else: 36 | self.visual_names = ['pred', 'img'] 37 | 38 | if self.isTrain: 39 | self.model_names = ['G_Depth_S'] 40 | 41 | else: 42 | self.model_names = ['G_Depth_S'] 43 | 44 | self.netG_Depth_S = networks.init_net(networks.UNetGenerator(norm='batch'), init_type='normal', gpu_ids=opt.gpu_ids) 45 | 46 | self.netG_Src = networks.init_net(networks.ResGenerator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 47 | 48 | if self.isTrain: 49 | self.init_with_pretrained_model('G_Src', self.opt.g_src_premodel) 50 | self.netG_Src.eval() 51 | 52 | if self.isTrain: 53 | # define loss functions 54 | self.criterionDepthReg = torch.nn.L1Loss() 55 | self.criterionSmooth = networks.SmoothLoss() 56 | self.criterionImgRecon = networks.ReconLoss() 57 | 58 | self.optimizer_G_task = torch.optim.Adam(itertools.chain(self.netG_Depth_S.parameters()), 59 | lr=opt.lr_task, betas=(0.9, 0.999)) 60 | self.optimizers = [] 61 | self.optimizers.append(self.optimizer_G_task) 62 | 63 | def set_input(self, input): 64 | 65 | if self.isTrain: 66 | self.src_real_depth = input['src']['depth'].to(self.device) 67 | self.src_img = input['src']['img'].to(self.device) 68 | self.tgt_left_img = input['tgt']['left_img'].to(self.device) 69 | self.tgt_right_img = input['tgt']['right_img'].to(self.device) 70 | self.tgt_fb = input['tgt']['fb'] 71 | 72 | self.num = self.src_img.shape[0] 73 | else: 74 | self.img = input['left_img'].to(self.device) 75 | 76 | def forward(self): 77 | 78 | if self.isTrain: 79 | 80 | self.fake_tgt = self.netG_Src(self.src_img).detach() 81 | self.out = self.netG_Depth_S(torch.cat((self.fake_tgt, self.tgt_left_img), 0)) 82 | self.src_gen_depth = self.out[-1].narrow(0, 0, self.num) 83 | self.tgt_gen_depth = self.out[-1].narrow(0, self.num, self.num) 84 | 85 | else: 86 | self.pred = self.netG_Depth_S(self.img)[-1] 87 | 88 | def backward_G(self): 89 | 90 | lambda_R_Depth = self.opt.lambda_R_Depth 91 | lambda_R_Img = self.opt.lambda_R_Img 92 | lambda_S_Depth = self.opt.lambda_S_Depth 93 | 94 | self.loss_R_Depth_Src = 0.0 95 | real_depths = dataset_util.scale_pyramid(self.src_real_depth, 4) 96 | for (gen_depth, real_depth) in zip(self.out, real_depths): 97 | self.loss_R_Depth_Src += self.criterionDepthReg(gen_depth[:self.num,:,:,:], real_depth) * lambda_R_Depth 98 | 99 | l_imgs = dataset_util.scale_pyramid(self.tgt_left_img, 4) 100 | r_imgs = dataset_util.scale_pyramid(self.tgt_right_img, 4) 101 | self.loss_R_Img_Tgt = 0.0 102 | i = 0 103 | for (l_img, r_img, gen_depth) in zip(l_imgs, r_imgs, self.out): 104 | loss, self.warp_tgt_img = self.criterionImgRecon(l_img, r_img, gen_depth[self.num:,:,:,:], self.tgt_fb / 2**(3-i)) 105 | self.loss_R_Img_Tgt += loss * lambda_R_Img 106 | i += 1 107 | 108 | i = 0 109 | self.loss_S_Depth_Tgt = 0.0 110 | for (gen_depth, img) in zip(self.out, l_imgs): 111 | self.loss_S_Depth_Tgt += self.criterionSmooth(gen_depth[self.num:,:,:,:], img) * self.opt.lambda_S_Depth / 2**i 112 | i += 1 113 | 114 | self.loss_G_Depth = self.loss_R_Img_Tgt + self.loss_S_Depth_Tgt + self.loss_R_Depth_Src 115 | self.loss_G_Depth.backward() 116 | 117 | def optimize_parameters(self): 118 | 119 | self.forward() 120 | self.optimizer_G_task.zero_grad() 121 | self.backward_G() 122 | self.optimizer_G_task.step() 123 | -------------------------------------------------------------------------------- /models/ft_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from .base_model import BaseModel 4 | from . import networks 5 | from utils.image_pool import ImagePool 6 | import torch.nn.functional as F 7 | from utils import dataset_util 8 | 9 | class FTModel(BaseModel): 10 | def name(self): 11 | return 'FTModel' 12 | 13 | @staticmethod 14 | def modify_commandline_options(parser, is_train=True): 15 | 16 | parser.set_defaults(no_dropout=True) 17 | if is_train: 18 | parser.add_argument('--lambda_R_Depth', type=float, default=1.0, help='weight for reconstruction loss') 19 | parser.add_argument('--lambda_S_Depth', type=float, default=0.01, help='weight for smooth loss') 20 | 21 | parser.add_argument('--lambda_R_Img', type=float, default=1.0,help='weight for image reconstruction') 22 | 23 | parser.add_argument('--g_tgt_premodel', type=str, default=" ",help='pretrained G_Tgt model') 24 | 25 | return parser 26 | 27 | def initialize(self, opt): 28 | BaseModel.initialize(self, opt) 29 | 30 | if self.isTrain: 31 | self.loss_names = ['R_Depth_Src', 'S_Depth_Tgt', 'R_Img_Tgt'] 32 | 33 | if self.isTrain: 34 | self.visual_names = ['src_img', 'src_real_depth', 'src_gen_depth', 'tgt_left_img', 'fake_src_left', 'tgt_gen_depth', 'warp_tgt_img', 'tgt_right_img'] 35 | else: 36 | self.visual_names = ['pred', 'img'] 37 | 38 | if self.isTrain: 39 | self.model_names = ['G_Depth_T', 'G_Tgt'] 40 | 41 | else: 42 | self.model_names = ['G_Depth_T', 'G_Tgt'] 43 | 44 | self.netG_Depth_T = networks.init_net(networks.UNetGenerator(norm='batch'), init_type='normal', gpu_ids=opt.gpu_ids) 45 | 46 | self.netG_Tgt = networks.init_net(networks.ResGenerator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 47 | 48 | if self.isTrain: 49 | self.init_with_pretrained_model('G_Tgt', self.opt.g_tgt_premodel) 50 | self.netG_Tgt.eval() 51 | 52 | if self.isTrain: 53 | # define loss functions 54 | self.criterionDepthReg = torch.nn.L1Loss() 55 | self.criterionSmooth = networks.SmoothLoss() 56 | self.criterionImgRecon = networks.ReconLoss() 57 | 58 | self.optimizer_G_task = torch.optim.Adam(itertools.chain(self.netG_Depth_T.parameters()), 59 | lr=opt.lr_task, betas=(0.9, 0.999)) 60 | self.optimizers = [] 61 | self.optimizers.append(self.optimizer_G_task) 62 | 63 | def set_input(self, input): 64 | 65 | if self.isTrain: 66 | self.src_real_depth = input['src']['depth'].to(self.device) 67 | self.src_img = input['src']['img'].to(self.device) 68 | self.tgt_left_img = input['tgt']['left_img'].to(self.device) 69 | self.tgt_right_img = input['tgt']['right_img'].to(self.device) 70 | self.tgt_fb = input['tgt']['fb'] 71 | 72 | self.num = self.src_img.shape[0] 73 | else: 74 | self.img = input['left_img'].to(self.device) 75 | 76 | def forward(self): 77 | 78 | if self.isTrain: 79 | 80 | self.fake_src_left = self.netG_Tgt(self.tgt_left_img).detach() 81 | self.out = self.netG_Depth_T(torch.cat((self.src_img, self.fake_src_left), 0)) 82 | self.src_gen_depth = self.out[-1].narrow(0, 0, self.num) 83 | self.tgt_gen_depth = self.out[-1].narrow(0, self.num, self.num) 84 | 85 | else: 86 | self.img_trans = self.netG_Tgt(self.img) 87 | self.pred = self.netG_Depth_T(self.img_trans)[-1] 88 | 89 | def backward_G(self): 90 | 91 | lambda_R_Depth = self.opt.lambda_R_Depth 92 | lambda_R_Img = self.opt.lambda_R_Img 93 | lambda_S_Depth = self.opt.lambda_S_Depth 94 | 95 | self.loss_R_Depth_Src = 0.0 96 | real_depths = dataset_util.scale_pyramid(self.src_real_depth, 4) 97 | for (gen_depth, real_depth) in zip(self.out, real_depths): 98 | self.loss_R_Depth_Src += self.criterionDepthReg(gen_depth[:self.num,:,:,:], real_depth) * lambda_R_Depth 99 | 100 | l_imgs = dataset_util.scale_pyramid(self.tgt_left_img, 4) 101 | r_imgs = dataset_util.scale_pyramid(self.tgt_right_img, 4) 102 | self.loss_R_Img_Tgt = 0.0 103 | i = 0 104 | for (l_img, r_img, gen_depth) in zip(l_imgs, r_imgs, self.out): 105 | loss, self.warp_tgt_img = self.criterionImgRecon(l_img, r_img, gen_depth[self.num:,:,:,:], self.tgt_fb / 2**(3-i)) 106 | self.loss_R_Img_Tgt += loss * lambda_R_Img 107 | i += 1 108 | 109 | i = 0 110 | self.loss_S_Depth_Tgt = 0.0 111 | for (gen_depth, img) in zip(self.out, l_imgs): 112 | self.loss_S_Depth_Tgt += self.criterionSmooth(gen_depth[self.num:,:,:,:], img) * self.opt.lambda_S_Depth / 2**i 113 | i += 1 114 | 115 | self.loss_G_Depth = self.loss_R_Img_Tgt + self.loss_S_Depth_Tgt + self.loss_R_Depth_Src 116 | self.loss_G_Depth.backward() 117 | 118 | def optimize_parameters(self): 119 | 120 | self.forward() 121 | self.optimizer_G_task.zero_grad() 122 | self.backward_G() 123 | self.optimizer_G_task.step() 124 | -------------------------------------------------------------------------------- /models/gasda_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from .base_model import BaseModel 4 | from . import networks 5 | from utils.image_pool import ImagePool 6 | import torch.nn.functional as F 7 | from utils import dataset_util 8 | 9 | class GASDAModel(BaseModel): 10 | def name(self): 11 | return 'GASDAModel' 12 | 13 | @staticmethod 14 | def modify_commandline_options(parser, is_train=True): 15 | 16 | parser.set_defaults(no_dropout=True) 17 | if is_train: 18 | parser.add_argument('--lambda_R_Depth', type=float, default=50.0, help='weight for reconstruction loss') 19 | parser.add_argument('--lambda_C_Depth', type=float, default=50.0, help='weight for consistency') 20 | 21 | parser.add_argument('--lambda_S_Depth', type=float, default=0.01, 22 | help='weight for smooth loss') 23 | 24 | parser.add_argument('--lambda_R_Img', type=float, default=50.0,help='weight for image reconstruction') 25 | # cyclegan 26 | parser.add_argument('--lambda_Src', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)') 27 | parser.add_argument('--lambda_Tgt', type=float, default=1.0, 28 | help='weight for cycle loss (B -> A -> B)') 29 | parser.add_argument('--lambda_identity', type=float, default=30.0, 30 | help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') 31 | 32 | parser.add_argument('--s_depth_premodel', type=str, default=" ", 33 | help='pretrained depth estimation model') 34 | parser.add_argument('--t_depth_premodel', type=str, default=" ", 35 | help='pretrained depth estimation model') 36 | 37 | parser.add_argument('--g_src_premodel', type=str, default=" ", 38 | help='pretrained G_Src model') 39 | parser.add_argument('--g_tgt_premodel', type=str, default=" ", 40 | help='pretrained G_Tgt model') 41 | parser.add_argument('--d_src_premodel', type=str, default=" ", 42 | help='pretrained D_Src model') 43 | parser.add_argument('--d_tgt_premodel', type=str, default=" ", 44 | help='pretrained D_Tgt model') 45 | 46 | parser.add_argument('--freeze_bn', action='store_true', help='freeze the bn in mde') 47 | parser.add_argument('--freeze_in', action='store_true', help='freeze the in in cyclegan') 48 | return parser 49 | 50 | def initialize(self, opt): 51 | BaseModel.initialize(self, opt) 52 | 53 | if self.isTrain: 54 | self.loss_names = ['R_Depth_Src_S', 'S_Depth_Tgt_S', 'R_Img_Tgt_S', 'C_Depth_Tgt'] 55 | self.loss_names += ['R_Depth_Src_T', 'S_Depth_Tgt_T', 'R_Img_Tgt_T'] 56 | self.loss_names += ['D_Src', 'G_Src', 'cycle_Src', 'idt_Src', 'D_Tgt', 'G_Tgt', 'cycle_Tgt', 'idt_Tgt'] 57 | 58 | if self.isTrain: 59 | visual_names_src = ['src_img', 'fake_tgt', 'src_real_depth', 'src_gen_depth', 'src_gen_depth_t', 'src_gen_depth_s'] 60 | visual_names_tgt = ['tgt_left_img', 'fake_src_left', 'tgt_gen_depth', 'warp_tgt_img_s', 'warp_tgt_img_t', 'tgt_gen_depth_s', 'tgt_gen_depth_t', 'tgt_right_img'] 61 | if self.opt.lambda_identity > 0.0: 62 | visual_names_src.append('idt_src_left') 63 | visual_names_tgt.append('idt_tgt') 64 | self.visual_names = visual_names_src + visual_names_tgt 65 | else: 66 | self.visual_names = ['pred', 'img', 'img_trans'] 67 | 68 | if self.isTrain: 69 | self.model_names = ['G_Depth_S', 'G_Depth_T'] 70 | self.model_names += ['G_Src', 'G_Tgt', 'D_Src', 'D_Tgt'] 71 | else: 72 | self.model_names = ['G_Depth_S', 'G_Depth_T', 'G_Tgt'] 73 | 74 | self.netG_Depth_S = networks.init_net(networks.UNetGenerator(norm='batch'), init_type='kaiming', gpu_ids=opt.gpu_ids) 75 | self.netG_Depth_T = networks.init_net(networks.UNetGenerator(norm='batch'), init_type='kaiming', gpu_ids=opt.gpu_ids) 76 | 77 | self.netG_Src = networks.init_net(networks.ResGenerator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 78 | self.netG_Tgt = networks.init_net(networks.ResGenerator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 79 | 80 | if self.isTrain: 81 | use_sigmoid = opt.no_lsgan 82 | 83 | self.netD_Src = networks.init_net(networks.Discriminator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 84 | self.netD_Tgt = networks.init_net(networks.Discriminator(norm='instance'), init_type='kaiming', gpu_ids=opt.gpu_ids) 85 | 86 | self.init_with_pretrained_model('G_Depth_S', self.opt.s_depth_premodel) 87 | self.init_with_pretrained_model('G_Depth_T', self.opt.t_depth_premodel) 88 | self.init_with_pretrained_model('G_Src', self.opt.g_src_premodel) 89 | self.init_with_pretrained_model('G_Tgt', self.opt.g_tgt_premodel) 90 | self.init_with_pretrained_model('D_Src', self.opt.d_src_premodel) 91 | self.init_with_pretrained_model('D_Tgt', self.opt.d_tgt_premodel) 92 | 93 | if self.isTrain: 94 | # define loss functions 95 | self.criterionDepthReg = torch.nn.L1Loss() 96 | self.criterionDepthCons = torch.nn.L1Loss() 97 | self.criterionSmooth = networks.SmoothLoss() 98 | self.criterionImgRecon = networks.ReconLoss() 99 | self.criterionLR = torch.nn.L1Loss() 100 | 101 | self.fake_src_pool = ImagePool(opt.pool_size) 102 | self.fake_tgt_pool = ImagePool(opt.pool_size) 103 | self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device) 104 | self.criterionCycle = torch.nn.L1Loss() 105 | self.criterionIdt = torch.nn.L1Loss() 106 | 107 | self.optimizer_G_task = torch.optim.Adam(itertools.chain(self.netG_Depth_S.parameters(), 108 | self.netG_Depth_T.parameters()), 109 | lr=opt.lr_task, betas=(0.95, 0.999)) 110 | self.optimizer_G_trans = torch.optim.Adam(itertools.chain(self.netG_Src.parameters(), 111 | self.netG_Tgt.parameters()), 112 | lr=opt.lr_trans, betas=(0.5, 0.9)) 113 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_Src.parameters(), 114 | self.netD_Tgt.parameters()), 115 | lr=opt.lr_trans, betas=(0.5, 0.9)) 116 | self.optimizers = [] 117 | self.optimizers.append(self.optimizer_G_task) 118 | self.optimizers.append(self.optimizer_G_trans) 119 | self.optimizers.append(self.optimizer_D) 120 | if opt.freeze_bn: 121 | self.netG_Depth_S.apply(networks.freeze_bn) 122 | self.netG_Depth_T.apply(networks.freeze_bn) 123 | if opt.freeze_in: 124 | self.netG_Src.apply(networks.freeze_in) 125 | self.netG_Tgt.apply(networks.freeze_in) 126 | 127 | def set_input(self, input): 128 | 129 | if self.isTrain: 130 | self.src_real_depth = input['src']['depth'].to(self.device) 131 | self.src_img = input['src']['img'].to(self.device) 132 | self.tgt_left_img = input['tgt']['left_img'].to(self.device) 133 | self.tgt_right_img = input['tgt']['right_img'].to(self.device) 134 | self.tgt_fb = input['tgt']['fb'] 135 | self.num = self.src_img.shape[0] 136 | else: 137 | self.img = input['left_img'].to(self.device) 138 | 139 | def forward(self): 140 | 141 | if self.isTrain: 142 | pass 143 | 144 | else: 145 | self.pred_s = self.netG_Depth_S(self.img)[-1] 146 | self.img_trans = self.netG_Tgt(self.img) 147 | self.pred_t = self.netG_Depth_T(self.img_trans)[-1] 148 | self.pred = 0.5 * (self.pred_s + self.pred_t) 149 | 150 | def backward_D_basic(self, netD, real, fake): 151 | # Real 152 | pred_real = netD(real.detach()) 153 | loss_D_real = self.criterionGAN(pred_real, True) 154 | # Fake 155 | pred_fake = netD(fake.detach()) 156 | loss_D_fake = self.criterionGAN(pred_fake, False) 157 | # Combined loss 158 | loss_D = (loss_D_real + loss_D_fake) * 0.5 159 | # backward 160 | loss_D.backward() 161 | return loss_D 162 | 163 | def backward_D_Src(self): 164 | fake_tgt = self.fake_tgt_pool.query(self.fake_tgt) 165 | self.loss_D_Src = self.backward_D_basic(self.netD_Src, self.tgt_left_img, fake_tgt) 166 | 167 | def backward_D_Tgt(self): 168 | fake_src_left = self.fake_src_pool.query(self.fake_src_left) 169 | self.loss_D_Tgt = self.backward_D_basic(self.netD_Tgt, self.src_img, fake_src_left) 170 | 171 | def backward_G(self): 172 | 173 | lambda_R_Depth = self.opt.lambda_R_Depth 174 | lambda_R_Img = self.opt.lambda_R_Img 175 | lambda_S_Depth = self.opt.lambda_S_Depth 176 | lambda_C_Depth = self.opt.lambda_C_Depth 177 | lambda_idt = self.opt.lambda_identity 178 | lambda_Src = self.opt.lambda_Src 179 | lambda_Tgt = self.opt.lambda_Tgt 180 | 181 | # =========================== synthetic ========================== 182 | self.fake_tgt = self.netG_Src(self.src_img) 183 | self.idt_tgt = self.netG_Tgt(self.src_img) 184 | self.rec_src = self.netG_Tgt(self.fake_tgt) 185 | self.out_s = self.netG_Depth_S(self.fake_tgt) 186 | self.out_t = self.netG_Depth_T(self.src_img) 187 | self.src_gen_depth_t = self.out_t[-1] 188 | self.src_gen_depth_s = self.out_s[-1] 189 | self.loss_G_Src = self.criterionGAN(self.netD_Src(self.fake_tgt), True) 190 | self.loss_cycle_Src = self.criterionCycle(self.rec_src, self.src_img) 191 | self.loss_idt_Tgt = self.criterionIdt(self.idt_tgt, self.src_img) * lambda_Src * lambda_idt 192 | self.loss_R_Depth_Src_S = 0.0 193 | real_depths = dataset_util.scale_pyramid(self.src_real_depth, 4) 194 | for (gen_depth, real_depth) in zip(self.out_s, real_depths): 195 | self.loss_R_Depth_Src_S += self.criterionDepthReg(gen_depth, real_depth) * lambda_R_Depth 196 | self.loss_R_Depth_Src_T = 0.0 197 | for (gen_depth, real_depth) in zip(self.out_t, real_depths): 198 | self.loss_R_Depth_Src_T += self.criterionDepthReg(gen_depth, real_depth) * lambda_R_Depth 199 | self.loss = self.loss_G_Src + self.loss_cycle_Src + self.loss_idt_Tgt + self.loss_R_Depth_Src_T + self.loss_R_Depth_Src_S 200 | self.loss.backward() 201 | 202 | # ============================= real ============================= 203 | self.fake_src_left = self.netG_Tgt(self.tgt_left_img) 204 | self.idt_src_left = self.netG_Src(self.tgt_left_img) 205 | self.rec_tgt_left = self.netG_Src(self.fake_src_left) 206 | self.out_s = self.netG_Depth_S(self.tgt_left_img) 207 | self.out_t = self.netG_Depth_T(self.fake_src_left) 208 | self.tgt_gen_depth_t = self.out_t[-1] 209 | self.tgt_gen_depth_s = self.out_s[-1] 210 | self.loss_G_Tgt = self.criterionGAN(self.netD_Tgt(self.fake_src_left), True) 211 | self.loss_cycle_Tgt = self.criterionCycle(self.rec_tgt_left, self.tgt_left_img) 212 | self.loss_idt_Src = self.criterionIdt(self.idt_src_left, self.tgt_left_img) * lambda_Tgt * lambda_idt 213 | # geometry consistency 214 | l_imgs = dataset_util.scale_pyramid(self.tgt_left_img, 4) 215 | r_imgs = dataset_util.scale_pyramid(self.tgt_right_img, 4) 216 | self.loss_R_Img_Tgt_S = 0.0 217 | i = 0 218 | for (l_img, r_img, gen_depth) in zip(l_imgs, r_imgs, self.out_s): 219 | loss, self.warp_tgt_img_s = self.criterionImgRecon(l_img, r_img, gen_depth, self.tgt_fb / 2**(3-i)) 220 | self.loss_R_Img_Tgt_S += loss * lambda_R_Img 221 | i += 1 222 | self.loss_R_Img_Tgt_T = 0.0 223 | i = 0 224 | for (l_img, r_img, gen_depth) in zip(l_imgs, r_imgs, self.out_t): 225 | loss, self.warp_tgt_img_t = self.criterionImgRecon(l_img, r_img, gen_depth, self.tgt_fb / 2**(3-i)) 226 | self.loss_R_Img_Tgt_T += loss * lambda_R_Img 227 | i += 1 228 | # smoothness 229 | i = 0 230 | self.loss_S_Depth_Tgt_S = 0.0 231 | for (gen_depth, img) in zip(self.out_s, l_imgs): 232 | self.loss_S_Depth_Tgt_S += self.criterionSmooth(gen_depth, img) * self.opt.lambda_S_Depth / 2**i 233 | i += 1 234 | i = 0 235 | self.loss_S_Depth_Tgt_T = 0.0 236 | for (gen_depth, img) in zip(self.out_t, l_imgs): 237 | self.loss_S_Depth_Tgt_T += self.criterionSmooth(gen_depth, img) * self.opt.lambda_S_Depth / 2**i 238 | i += 1 239 | # depth consistency 240 | self.loss_C_Depth_Tgt = 0.0 241 | for (gen_depth1, gen_depth2) in zip(self.out_s, self.out_t): 242 | self.loss_C_Depth_Tgt += self.criterionDepthCons(gen_depth1, gen_depth2) * lambda_C_Depth 243 | 244 | self.loss_G = self.loss_G_Tgt + self.loss_cycle_Tgt + self.loss_idt_Src + self.loss_R_Img_Tgt_T + self.loss_R_Img_Tgt_S + self.loss_S_Depth_Tgt_T + self.loss_S_Depth_Tgt_S + self.loss_C_Depth_Tgt 245 | self.loss_G.backward() 246 | self.tgt_gen_depth = (self.tgt_gen_depth_t + self.tgt_gen_depth_s) / 2.0 247 | self.src_gen_depth = (self.src_gen_depth_t + self.src_gen_depth_s) / 2.0 248 | 249 | def optimize_parameters(self): 250 | 251 | self.forward() 252 | self.set_requires_grad([self.netD_Src, self.netD_Tgt], False) 253 | self.optimizer_G_trans.zero_grad() 254 | self.optimizer_G_task.zero_grad() 255 | self.backward_G() 256 | self.optimizer_G_trans.step() 257 | self.optimizer_G_task.step() 258 | 259 | self.set_requires_grad([self.netD_Src, self.netD_Tgt], True) 260 | self.optimizer_D.zero_grad() 261 | self.backward_D_Src() 262 | self.backward_D_Tgt() 263 | self.optimizer_D.step() 264 | -------------------------------------------------------------------------------- /models/networks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | import functools 5 | from torch.optim import lr_scheduler 6 | import torch.nn.functional as F 7 | from torchvision import models 8 | import numpy as np 9 | from torch.autograd import Function 10 | from utils.bilinear_sampler import * 11 | 12 | def freeze_bn(m): 13 | classname = m.__class__.__name__ 14 | if classname.find('BatchNorm') != -1: 15 | m.eval() 16 | m.weight.requires_grad = False 17 | m.bias.requires_grad = False 18 | 19 | def freeze_in(m): 20 | classname = m.__class__.__name__ 21 | if classname.find('InstanceNorm') != -1: 22 | m.eval() 23 | #m.weight.requires_grad = False 24 | #m.bias.requires_grad = False 25 | 26 | def unfreeze_bn(m): 27 | classname = m.__class__.__name__ 28 | if classname.find('BatchNorm') != -1: 29 | m.train() 30 | m.weight.requires_grad = True 31 | m.bias.requires_grad = True 32 | 33 | def get_nonlinearity_layer(activation_type='PReLU'): 34 | if activation_type == 'ReLU': 35 | nonlinearity_layer = nn.ReLU(True) 36 | elif activation_type == 'SELU': 37 | nonlinearity_layer = nn.SELU(True) 38 | elif activation_type == 'LeakyReLU': 39 | nonlinearity_layer = nn.LeakyReLU(0.1, True) 40 | elif activation_type == 'PReLU': 41 | nonlinearity_layer = nn.PReLU() 42 | else: 43 | raise NotImplementedError('activation layer [%s] is not found' % activation_type) 44 | return nonlinearity_layer 45 | 46 | def get_norm_layer(norm_type='instance'): 47 | if norm_type == 'batch': 48 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True) 49 | elif norm_type == 'instance': 50 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True) 51 | elif norm_type == 'none': 52 | norm_layer = None 53 | else: 54 | raise NotImplementedError('normalization layer [%s] is not found' % norm_type) 55 | return norm_layer 56 | 57 | 58 | def get_scheduler(optimizer, opt): 59 | if opt.lr_policy == 'lambda': 60 | def lambda_rule(epoch): 61 | lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) 62 | return lr_l 63 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 64 | elif opt.lr_policy == 'step': 65 | scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.5) 66 | elif opt.lr_policy == 'plateau': 67 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 68 | else: 69 | return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) 70 | return scheduler 71 | 72 | 73 | def init_weights(net, init_type='normal', gain=0.02): 74 | def init_func(m): 75 | classname = m.__class__.__name__ 76 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 77 | if init_type == 'normal': 78 | init.normal_(m.weight.data, 0.0, gain) 79 | elif init_type == 'xavier': 80 | init.xavier_normal_(m.weight.data, gain=gain) 81 | elif init_type == 'kaiming': 82 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 83 | elif init_type == 'orthogonal': 84 | init.orthogonal_(m.weight.data, gain=gain) 85 | else: 86 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 87 | if hasattr(m, 'bias') and m.bias is not None: 88 | init.constant_(m.bias.data, 0.0) 89 | elif classname.find('BatchNorm2d') != -1: 90 | init.normal_(m.weight.data, 1.0, gain) 91 | init.constant_(m.bias.data, 0.0) 92 | 93 | print('initialize network with %s' % init_type) 94 | net.apply(init_func) 95 | 96 | 97 | def init_net(net, init_type='normal', gpu_ids=[]): 98 | if len(gpu_ids) > 0: 99 | assert(torch.cuda.is_available()) 100 | net.to(gpu_ids[0]) 101 | net = torch.nn.DataParallel(net, gpu_ids) 102 | init_weights(net, init_type) 103 | return net 104 | 105 | def gradient_x(img): 106 | gx = img[:,:,:-1,:] - img[:,:,1:,:] 107 | return gx 108 | 109 | def gradient_y(img): 110 | gy = img[:,:,:,:-1] - img[:,:,:,1:] 111 | return gy 112 | 113 | def get_grid(x): 114 | torchHorizontal = torch.linspace(-1.0, 1.0, x.size(3)).view(1, 1, 1, x.size(3)).expand(x.size(0), 1, x.size(2), x.size(3)) 115 | torchVertical = torch.linspace(-1.0, 1.0, x.size(2)).view(1, 1, x.size(2), 1).expand(x.size(0), 1, x.size(2), x.size(3)) 116 | grid = torch.cat([torchHorizontal, torchVertical], 1) 117 | 118 | return grid 119 | 120 | def ssim(x, y): 121 | 122 | C1 = 0.01 ** 2 123 | C2 = 0.03 ** 2 124 | 125 | mu_x = F.avg_pool2d(x, 3, 1) 126 | mu_y = F.avg_pool2d(y, 3, 1) 127 | 128 | sigma_x = F.avg_pool2d(x**2, 3, 1) - mu_x**2 129 | sigma_y = F.avg_pool2d(y**2, 3, 1) - mu_y**2 130 | sigma_xy = F.avg_pool2d(x*y, 3, 1) - mu_x*mu_y 131 | 132 | SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) 133 | SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) 134 | 135 | SSIM = SSIM_n / SSIM_d 136 | return torch.clamp((1-SSIM)/2, 0, 1) 137 | 138 | ############################################################################## 139 | # Classes 140 | ############################################################################## 141 | 142 | class BerHuLoss(nn.Module): 143 | def __init__(self): 144 | super(BerHuLoss, self).__init__() 145 | 146 | def forward(self, input, target): 147 | x = input - target 148 | abs_x = torch.abs(x) 149 | c = torch.max(abs_x).item() / 5 150 | mask = (abs_x <= c).float() 151 | l2_losses = (x ** 2 + c ** 2) / (2 * c) 152 | losses = mask * abs_x + (1 - mask) * l2_losses 153 | count = np.prod(input.size(), dtype=np.float32).item() 154 | 155 | return torch.sum(losses) / count 156 | 157 | class SmoothLoss(nn.Module): 158 | def __init__(self): 159 | super(SmoothLoss, self).__init__() 160 | 161 | def forward(self, depth, image): 162 | depth_grad_x = gradient_x(depth) 163 | depth_grad_y = gradient_y(depth) 164 | image_grad_x = gradient_x(image) 165 | image_grad_y = gradient_y(image) 166 | 167 | weights_x = torch.exp(-torch.mean(torch.abs(image_grad_x),1,True)) 168 | weights_y = torch.exp(-torch.mean(torch.abs(image_grad_y),1,True)) 169 | smoothness_x = depth_grad_x*weights_x 170 | smoothness_y = depth_grad_y*weights_y 171 | 172 | loss_x = torch.mean(torch.abs(smoothness_x)) 173 | loss_y = torch.mean(torch.abs(smoothness_y)) 174 | 175 | 176 | loss = loss_x + loss_y 177 | 178 | return loss 179 | 180 | class ReconLoss(nn.Module): 181 | def __init__(self, alpha=0.85): 182 | super(ReconLoss, self).__init__() 183 | self.alpha = alpha 184 | 185 | def forward(self, img0, img1, pred, fb, max_d=655.35): 186 | 187 | x0 = (img0 + 1.0) / 2.0 188 | x1 = (img1 + 1.0) / 2.0 189 | 190 | assert x0.shape[0] == pred.shape[0] 191 | assert pred.shape[0] == fb.shape[0] 192 | 193 | new_depth = (pred + 1.0) / 2.0 194 | new_depth *= max_d 195 | disp = 1.0 / (new_depth+1e-6) 196 | tmp = np.array(fb) 197 | for i in range(new_depth.shape[0]): 198 | disp[i,:,:,:] *= tmp[i] 199 | disp[i,:,:,:] /= disp.shape[3] # normlize to [0,1] 200 | 201 | #x0_w = warp(x1, -1.0*disp) 202 | x0_w = bilinear_sampler_1d_h(x1, -1.0*disp) 203 | 204 | ssim_ = ssim(x0, x0_w) 205 | l1 = torch.abs(x0-x0_w) 206 | loss1 = torch.mean(self.alpha * ssim_) 207 | loss2 = torch.mean((1-self.alpha) * l1) 208 | loss = loss1 + loss2 209 | 210 | recon_img = x0_w * 2.0-1.0 211 | 212 | return loss, recon_img 213 | 214 | # Defines the GAN loss which uses either LSGAN or the regular GAN. 215 | # When LSGAN is used, it is basically same as MSELoss, 216 | # but it abstracts away the need to create the target label tensor 217 | # that has the same size as the input 218 | class GANLoss(nn.Module): 219 | def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0): 220 | super(GANLoss, self).__init__() 221 | self.register_buffer('real_label', torch.tensor(target_real_label)) 222 | self.register_buffer('fake_label', torch.tensor(target_fake_label)) 223 | if use_lsgan: 224 | self.loss = nn.MSELoss() 225 | else: 226 | self.loss = nn.BCELoss() 227 | 228 | def get_target_tensor(self, input, target_is_real): 229 | if target_is_real: 230 | target_tensor = self.real_label 231 | else: 232 | target_tensor = self.fake_label 233 | return target_tensor.expand_as(input) 234 | 235 | def __call__(self, input, target_is_real): 236 | target_tensor = self.get_target_tensor(input, target_is_real) 237 | return self.loss(input, target_tensor) 238 | 239 | 240 | class GaussianNoiseLayer(nn.Module): 241 | def __init__(self): 242 | super(GaussianNoiseLayer, self).__init__() 243 | 244 | def forward(self, x): 245 | if self.training == False: 246 | return x 247 | noise = Variable((torch.randn(x.size()).cuda(x.data.get_device()) - 0.5) / 10.0) 248 | return x+noise 249 | 250 | 251 | class _InceptionBlock(nn.Module): 252 | def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), width=1, drop_rate=0, use_bias=False): 253 | super(_InceptionBlock, self).__init__() 254 | 255 | self.width = width 256 | self.drop_rate = drop_rate 257 | 258 | for i in range(width): 259 | layer = nn.Sequential( 260 | nn.ReflectionPad2d(i*2+1), 261 | nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=0, dilation=i*2+1, bias=use_bias) 262 | ) 263 | setattr(self, 'layer'+str(i), layer) 264 | 265 | self.norm1 = norm_layer(output_nc * width) 266 | self.norm2 = norm_layer(output_nc) 267 | self.nonlinearity = nonlinearity 268 | self.branch1x1 = nn.Sequential( 269 | nn.ReflectionPad2d(1), 270 | nn.Conv2d(output_nc * width, output_nc, kernel_size=3, padding=0, bias=use_bias) 271 | ) 272 | 273 | 274 | def forward(self, x): 275 | result = [] 276 | for i in range(self.width): 277 | layer = getattr(self, 'layer'+str(i)) 278 | result.append(layer(x)) 279 | output = torch.cat(result, 1) 280 | output = self.nonlinearity(self.norm1(output)) 281 | output = self.norm2(self.branch1x1(output)) 282 | if self.drop_rate > 0: 283 | output = F.dropout(output, p=self.drop_rate, training=self.training) 284 | 285 | return self.nonlinearity(output+x) 286 | 287 | 288 | class _EncoderBlock(nn.Module): 289 | def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False): 290 | super(_EncoderBlock, self).__init__() 291 | 292 | model = [ 293 | nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=1, bias=use_bias), 294 | norm_layer(middle_nc), 295 | nonlinearity, 296 | nn.Conv2d(middle_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias), 297 | norm_layer(output_nc), 298 | nonlinearity 299 | ] 300 | 301 | self.model = nn.Sequential(*model) 302 | 303 | def forward(self, x): 304 | return self.model(x) 305 | 306 | 307 | class _DownBlock(nn.Module): 308 | def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False): 309 | super(_DownBlock, self).__init__() 310 | 311 | model = [ 312 | nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias), 313 | norm_layer(output_nc), 314 | nonlinearity, 315 | nn.MaxPool2d(kernel_size=2, stride=2), 316 | ] 317 | 318 | self.model = nn.Sequential(*model) 319 | 320 | def forward(self, x): 321 | return self.model(x) 322 | 323 | 324 | class _ShuffleUpBlock(nn.Module): 325 | def __init__(self, input_nc, up_scale, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False): 326 | super(_ShuffleUpBlock, self).__init__() 327 | 328 | model = [ 329 | nn.Conv2d(input_nc, input_nc*up_scale**2, kernel_size=3, stride=1, padding=1, bias=use_bias), 330 | nn.PixelShuffle(up_scale), 331 | nonlinearity, 332 | nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias), 333 | norm_layer(output_nc), 334 | nonlinearity 335 | ] 336 | 337 | self.model = nn.Sequential(*model) 338 | 339 | def forward(self, x): 340 | return self.model(x) 341 | 342 | 343 | class _DecoderUpBlock(nn.Module): 344 | def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False): 345 | super(_DecoderUpBlock, self).__init__() 346 | 347 | model = [ 348 | nn.ReflectionPad2d(1), 349 | nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=0, bias=use_bias), 350 | norm_layer(middle_nc), 351 | nonlinearity, 352 | nn.ConvTranspose2d(middle_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1), 353 | norm_layer(output_nc), 354 | nonlinearity 355 | ] 356 | 357 | self.model = nn.Sequential(*model) 358 | 359 | def forward(self, x): 360 | return self.model(x) 361 | 362 | 363 | class _OutputBlock(nn.Module): 364 | def __init__(self, input_nc, output_nc, kernel_size=3, use_bias=False): 365 | super(_OutputBlock, self).__init__() 366 | model = [ 367 | nn.ReflectionPad2d(int(kernel_size/2)), 368 | nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, padding=0, bias=use_bias), 369 | nn.Tanh() 370 | ] 371 | 372 | self.model = nn.Sequential(*model) 373 | 374 | def forward(self, x): 375 | return self.model(x) 376 | class UNetGenerator(nn.Module): 377 | def __init__(self, input_nc=3, output_nc=1, ngf=64, layers=4, norm='batch', drop_rate=0, add_noise=False, weight=0.1): 378 | super(UNetGenerator, self).__init__() 379 | 380 | self.layers = layers 381 | self.weight = weight 382 | norm_layer = get_norm_layer(norm_type=norm) 383 | nonlinearity = get_nonlinearity_layer(activation_type='PReLU') 384 | 385 | if type(norm_layer) == functools.partial: 386 | use_bias = norm_layer.func == nn.InstanceNorm2d 387 | else: 388 | use_bias = norm_layer == nn.InstanceNorm2d 389 | 390 | # encoder part 391 | self.pool = nn.AvgPool2d(kernel_size=2, stride=2) 392 | self.conv1 = nn.Sequential( 393 | nn.ReflectionPad2d(3), 394 | nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), 395 | norm_layer(ngf), 396 | nonlinearity 397 | ) 398 | self.conv2 = _EncoderBlock(ngf, ngf*2, ngf*2, norm_layer, nonlinearity, use_bias) 399 | self.conv3 = _EncoderBlock(ngf*2, ngf*4, ngf*4, norm_layer, nonlinearity, use_bias) 400 | self.conv4 = _EncoderBlock(ngf*4, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias) 401 | 402 | for i in range(layers-4): 403 | conv = _EncoderBlock(ngf*8, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias) 404 | setattr(self, 'down'+str(i), conv.model) 405 | 406 | center=[] 407 | for i in range(7-layers): 408 | center +=[ 409 | _InceptionBlock(ngf*8, ngf*8, norm_layer, nonlinearity, 7-layers, drop_rate, use_bias) 410 | ] 411 | 412 | center += [ 413 | _DecoderUpBlock(ngf*8, ngf*8, ngf*4, norm_layer, nonlinearity, use_bias) 414 | ] 415 | if add_noise: 416 | center += [GaussianNoiseLayer()] 417 | self.center = nn.Sequential(*center) 418 | 419 | for i in range(layers-4): 420 | upconv = _DecoderUpBlock(ngf*(8+4), ngf*8, ngf*4, norm_layer, nonlinearity, use_bias) 421 | setattr(self, 'up' + str(i), upconv.model) 422 | 423 | self.deconv4 = _DecoderUpBlock(ngf*(4+4), ngf*8, ngf*2, norm_layer, nonlinearity, use_bias) 424 | self.deconv3 = _DecoderUpBlock(ngf*(2+2)+output_nc, ngf*4, ngf, norm_layer, nonlinearity, use_bias) 425 | self.deconv2 = _DecoderUpBlock(ngf*(1+1)+output_nc, ngf*2, int(ngf/2), norm_layer, nonlinearity, use_bias) 426 | 427 | self.output4 = _OutputBlock(ngf*(4+4), output_nc, 3, use_bias) 428 | self.output3 = _OutputBlock(ngf*(2+2)+output_nc, output_nc, 3, use_bias) 429 | self.output2 = _OutputBlock(ngf*(1+1)+output_nc, output_nc, 3, use_bias) 430 | self.output1 = _OutputBlock(int(ngf/2)+output_nc, output_nc, 7, use_bias) 431 | 432 | self.upsample = nn.Upsample(scale_factor=2, mode='nearest') 433 | 434 | def forward(self, input): 435 | conv1 = self.pool(self.conv1(input)) 436 | conv2 = self.pool(self.conv2.forward(conv1)) 437 | conv3 = self.pool(self.conv3.forward(conv2)) 438 | center_in = self.pool(self.conv4.forward(conv3)) 439 | 440 | middle = [center_in] 441 | for i in range(self.layers-4): 442 | model = getattr(self, 'down'+str(i)) 443 | center_in = self.pool(model.forward(center_in)) 444 | middle.append(center_in) 445 | center_out = self.center.forward(center_in) 446 | #result = [center_in] 447 | 448 | for i in range(self.layers-4): 449 | model = getattr(self, 'up'+str(i)) 450 | center_out = model.forward(torch.cat([center_out, middle[self.layers-5-i]], 1)) 451 | 452 | scale = 1.0 453 | result = [] 454 | deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * self.weight], 1)) 455 | output4 = scale * self.output4.forward(torch.cat([center_out, conv3 * self.weight], 1)) 456 | result.append(output4) 457 | deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1)) 458 | output3 = scale * self.output3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1)) 459 | result.append(output3) 460 | deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1)) 461 | output2 = scale * self.output2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1)) 462 | result.append(output2) 463 | output1 = scale * self.output1.forward(torch.cat([deconv2, self.upsample(output2)], 1)) 464 | result.append(output1) 465 | 466 | return result 467 | 468 | # Defines the generator that consists of Resnet blocks between a few 469 | # downsampling/upsampling operations. 470 | # Code and idea originally from Justin Johnson's architecture. 471 | # https://github.com/jcjohnson/fast-neural-style/ 472 | class ResGenerator(nn.Module): 473 | def __init__(self, input_nc=3, output_nc=3, ngf=64, norm='batch', use_dropout=False, n_blocks=9, padding_type='reflect'): 474 | assert(n_blocks >= 0) 475 | super(ResGenerator, self).__init__() 476 | norm_layer = get_norm_layer(norm_type=norm) 477 | self.input_nc = input_nc 478 | self.output_nc = output_nc 479 | self.ngf = ngf 480 | if type(norm_layer) == functools.partial: 481 | use_bias = norm_layer.func == nn.InstanceNorm2d 482 | else: 483 | use_bias = norm_layer == nn.InstanceNorm2d 484 | 485 | model = [nn.ReflectionPad2d(3), 486 | nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, 487 | bias=use_bias), 488 | norm_layer(ngf), 489 | nn.ReLU(True)] 490 | 491 | n_downsampling = 2 492 | for i in range(n_downsampling): 493 | mult = 2**i 494 | model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, 495 | stride=2, padding=1, bias=use_bias), 496 | norm_layer(ngf * mult * 2), 497 | nn.ReLU(True)] 498 | 499 | mult = 2**n_downsampling 500 | for i in range(n_blocks): 501 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] 502 | 503 | for i in range(n_downsampling): 504 | mult = 2**(n_downsampling - i) 505 | model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), 506 | kernel_size=3, stride=2, 507 | padding=1, output_padding=1, 508 | bias=use_bias), 509 | norm_layer(int(ngf * mult / 2)), 510 | nn.ReLU(True)] 511 | model += [nn.ReflectionPad2d(3)] 512 | model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] 513 | model += [nn.Tanh()] 514 | 515 | self.model = nn.Sequential(*model) 516 | 517 | def forward(self, input): 518 | return self.model(input) 519 | 520 | # Define a resnet block 521 | class ResnetBlock(nn.Module): 522 | def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): 523 | super(ResnetBlock, self).__init__() 524 | self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) 525 | 526 | def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): 527 | conv_block = [] 528 | p = 0 529 | if padding_type == 'reflect': 530 | conv_block += [nn.ReflectionPad2d(1)] 531 | elif padding_type == 'replicate': 532 | conv_block += [nn.ReplicationPad2d(1)] 533 | elif padding_type == 'zero': 534 | p = 1 535 | else: 536 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 537 | 538 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), 539 | norm_layer(dim), 540 | nn.ReLU(True)] 541 | if use_dropout: 542 | conv_block += [nn.Dropout(0.5)] 543 | 544 | p = 0 545 | if padding_type == 'reflect': 546 | conv_block += [nn.ReflectionPad2d(1)] 547 | elif padding_type == 'replicate': 548 | conv_block += [nn.ReplicationPad2d(1)] 549 | elif padding_type == 'zero': 550 | p = 1 551 | else: 552 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 553 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), 554 | norm_layer(dim)] 555 | 556 | return nn.Sequential(*conv_block) 557 | 558 | def forward(self, x): 559 | out = x + self.conv_block(x) 560 | return out 561 | 562 | class Discriminator(nn.Module): 563 | def __init__(self, input_nc=3, ndf=64, n_layers=3, norm='batch', use_sigmoid=False): 564 | super(Discriminator, self).__init__() 565 | norm_layer = get_norm_layer(norm_type=norm) 566 | if type(norm_layer) == functools.partial: 567 | use_bias = norm_layer.func == nn.InstanceNorm2d 568 | else: 569 | use_bias = norm_layer == nn.InstanceNorm2d 570 | 571 | kw = 4 572 | padw = 1 573 | sequence = [ 574 | nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), 575 | nn.LeakyReLU(0.2, True) 576 | ] 577 | 578 | nf_mult = 1 579 | nf_mult_prev = 1 580 | for n in range(1, n_layers): 581 | nf_mult_prev = nf_mult 582 | nf_mult = min(2**n, 8) 583 | sequence += [ 584 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, 585 | kernel_size=kw, stride=2, padding=padw, bias=use_bias), 586 | norm_layer(ndf * nf_mult), 587 | nn.LeakyReLU(0.2, True) 588 | ] 589 | 590 | nf_mult_prev = nf_mult 591 | nf_mult = min(2**n_layers, 8) 592 | sequence += [ 593 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, 594 | kernel_size=kw, stride=1, padding=padw, bias=use_bias), 595 | norm_layer(ndf * nf_mult), 596 | nn.LeakyReLU(0.2, True) 597 | ] 598 | 599 | sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] 600 | 601 | if use_sigmoid: 602 | sequence += [nn.Sigmoid()] 603 | 604 | self.model = nn.Sequential(*sequence) 605 | 606 | def forward(self, input): 607 | return self.model(input) 608 | -------------------------------------------------------------------------------- /models/networks.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/models/networks.pyc -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/options/__init__.py -------------------------------------------------------------------------------- /options/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/options/__init__.pyc -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from utils import util 4 | import torch 5 | import models 6 | import data 7 | 8 | 9 | class BaseOptions(): 10 | def __init__(self): 11 | self.initialized = False 12 | 13 | def initialize(self, parser): 14 | parser.add_argument('--src_root', type=str, default='./datasets/vkitti', help='path to source dataset') 15 | parser.add_argument('--tgt_root', type=str, default='./datasets/kitti', help='path to target dataset') 16 | parser.add_argument('--src_dataset', type=str, default='vkitti', help='synthetic domain') 17 | parser.add_argument('--tgt_dataset', type=str, default='kitti', help='real domain') 18 | parser.add_argument('--batchSize', type=int, default=1, help='input batch size') 19 | parser.add_argument('--loadSize', nargs='+', type=int, default=286, help='scale images to this size') 20 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 21 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 22 | parser.add_argument('--model', type=str, default='gasda', 23 | help='chooses which model to use. s2s, s2p, p2s, p2p') 24 | parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data') 25 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 26 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') 27 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') 28 | parser.add_argument('--no_rotation', action='store_true', help='if specified, do not rotate the images for data augmentation') 29 | parser.add_argument('--no_augment', action='store_true', help='if specified, do not use data augmentation, e.g., randomly shifting gamma') 30 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 31 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 32 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') 33 | parser.add_argument('--how_many', type=int, default=100, help='how many test or validatation images to run') 34 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}') 35 | self.initialized = True 36 | return parser 37 | 38 | def gather_options(self): 39 | # initialize parser with basic options 40 | if not self.initialized: 41 | parser = argparse.ArgumentParser( 42 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 43 | parser = self.initialize(parser) 44 | 45 | # get the basic options 46 | opt, _ = parser.parse_known_args() 47 | 48 | # modify model-related parser options 49 | model_name = opt.model 50 | model_option_setter = models.get_option_setter(model_name) 51 | parser = model_option_setter(parser, self.isTrain) 52 | opt, _ = parser.parse_known_args() # parse again with the new defaults 53 | 54 | self.parser = parser 55 | 56 | return parser.parse_args() 57 | 58 | def print_options(self, opt): 59 | message = '' 60 | message += '----------------- Options ---------------\n' 61 | for k, v in sorted(vars(opt).items()): 62 | comment = '' 63 | default = self.parser.get_default(k) 64 | if v != default: 65 | comment = '\t[default: %s]' % str(default) 66 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 67 | message += '----------------- End -------------------' 68 | print(message) 69 | 70 | # save to the disk 71 | if self.isTrain: 72 | expr_dir = os.path.join(opt.checkpoints_dir, opt.expr_name) 73 | util.mkdirs(expr_dir) 74 | file_name = os.path.join(expr_dir, 'opt.txt') 75 | with open(file_name, 'wt') as opt_file: 76 | opt_file.write(message) 77 | opt_file.write('\n') 78 | 79 | def parse(self): 80 | 81 | opt = self.gather_options() 82 | opt.isTrain = self.isTrain 83 | 84 | opt.expr_name = opt.src_dataset + '2' + opt.tgt_dataset + '_' + opt.model 85 | # process opt.suffix 86 | if opt.suffix: 87 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' 88 | opt.expr_name = opt.expr_name + suffix 89 | 90 | self.print_options(opt) 91 | 92 | # set gpu ids 93 | str_ids = opt.gpu_ids.split(',') 94 | opt.gpu_ids = [] 95 | for str_id in str_ids: 96 | id = int(str_id) 97 | if id >= 0: 98 | opt.gpu_ids.append(id) 99 | if len(opt.gpu_ids) > 0: 100 | torch.cuda.set_device(opt.gpu_ids[0]) 101 | 102 | self.opt = opt 103 | return self.opt 104 | -------------------------------------------------------------------------------- /options/base_options.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/options/base_options.pyc -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | def initialize(self, parser): 6 | parser = BaseOptions.initialize(self, parser) 7 | parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 8 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 9 | parser.add_argument('--root', type=str, default='datasets/kitti', help='data root') 10 | parser.add_argument('--test_datafile', type=str, default='test.txt', help='stores data list, in root') 11 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 12 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 13 | parser.add_argument('--save', action='store_true', help='save results') 14 | parser.add_argument('--test_dataset', type=str, default='kitti', help='kitti|stereo|make3d') 15 | 16 | self.isTrain = False 17 | return parser 18 | -------------------------------------------------------------------------------- /options/test_options.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/options/test_options.pyc -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TrainOptions(BaseOptions): 5 | def initialize(self, parser): 6 | parser = BaseOptions.initialize(self, parser) 7 | parser.add_argument('--src_train_datafile', type=str, default='train.txt', help='stores data list, in src_root') 8 | parser.add_argument('--tgt_train_datafile', type=str, default='train.txt', help='stores data list, in tgt_root') 9 | parser.add_argument('--print_freq', type=int, default=32, help='frequency of showing training results on console') 10 | parser.add_argument('--save_result_freq', type=int, default=3200, help='frequency of saving the latest prediction results') 11 | parser.add_argument('--save_latest_freq', type=int, default=3200, help='frequency of saving the latest trained model') 12 | parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') 13 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 14 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 15 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 16 | parser.add_argument('--niter', type=int, default=10, help='# of iter at starting learning rate') 17 | parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 18 | parser.add_argument('--beta1', type=float, default=0.9, help='momentum term of adam') 19 | parser.add_argument('--lr_task', type=float, default=1e-4, help='initial learning rate for adam') 20 | parser.add_argument('--lr_trans', type=float, default=5e-5, help='initial learning rate for adam') 21 | parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 22 | parser.add_argument('--scale_pred', action='store_true', help='scale prediction according the ratio of median value') 23 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 24 | parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy: lambda|step|plateau') 25 | parser.add_argument('--lr_decay_iters', type=int, default=10, help='multiply by a gamma every lr_decay_iters iterations') 26 | parser.add_argument('--no_val', action='store_true', help='validation') 27 | 28 | self.isTrain = True 29 | return parser 30 | -------------------------------------------------------------------------------- /options/train_options.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sshan-zhao/GASDA/3316e8b0dc6a51acbd89496e437da249352c1189/options/train_options.pyc -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch.nn 3 | from options.test_options import TestOptions 4 | from data import create_dataloader 5 | from models import create_model 6 | from utils import dataset_util 7 | import numpy as np 8 | import os 9 | from PIL import Image 10 | import cv2 11 | 12 | if __name__ == '__main__': 13 | opt = TestOptions().parse() 14 | data_loader = create_dataloader(opt) 15 | dataset_size = len(data_loader) 16 | print('#test images = %d' % dataset_size) 17 | 18 | model = create_model(opt) 19 | model.setup(opt) 20 | model.eval() 21 | 22 | save_dir = os.path.join('results', opt.model+'_'+opt.suffix+'_'+opt.which_epoch) 23 | os.makedirs(save_dir) 24 | num_samples = len(data_loader) 25 | rms = np.zeros(num_samples, np.float32) 26 | log_rms = np.zeros(num_samples, np.float32) 27 | abs_rel = np.zeros(num_samples, np.float32) 28 | sq_rel = np.zeros(num_samples, np.float32) 29 | a1 = np.zeros(num_samples, np.float32) 30 | a2 = np.zeros(num_samples, np.float32) 31 | a3 = np.zeros(num_samples, np.float32) 32 | MAX_DEPTH = 80 #50 33 | MIN_DEPTH = 1e-3 34 | 35 | for ind, data in enumerate(data_loader): 36 | 37 | model.set_input(data) 38 | model.test() 39 | 40 | visuals = model.get_current_visuals() 41 | 42 | gt_depth = np.squeeze(data['depth'].data.numpy()) 43 | pred_depth = np.squeeze(visuals['pred'].data.cpu().numpy()) 44 | 45 | w = gt_depth.shape[1] 46 | h = gt_depth.shape[0] 47 | 48 | mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH) 49 | crop = np.array([0.40810811 * h, 0.99189189 * h, 50 | 0.03594771 * w, 0.96405229 * w]).astype(np.int32) 51 | crop_mask = np.zeros(mask.shape) 52 | crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1 53 | mask = np.logical_and(mask, crop_mask) 54 | 55 | pred_depth = cv2.resize(pred_depth, (w, h), cv2.INTER_CUBIC) 56 | pred_depth += 1.0 57 | pred_depth /= 2.0 58 | pred_depth *= 655.35 59 | 60 | # evaluate 61 | pred_depth[pred_depth<1e-3] = 1e-3 62 | pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH 63 | 64 | abs_rel[ind], sq_rel[ind], rms[ind], log_rms[ind], a1[ind], a2[ind], a3[ind] = dataset_util.compute_errors(gt_depth[mask], pred_depth[mask]) 65 | 66 | # save 67 | pred_img = Image.fromarray(pred_depth.astype(np.int32)*100, 'I') 68 | pred_img.save('%s/%05d_pred.png'%(save_dir, ind)) 69 | print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('abs_rel', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3')) 70 | print("{:10.4f}, {:10.4f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}".format(abs_rel.mean(), sq_rel.mean(), rms.mean(), log_rms.mean(), a1.mean(), a2.mean(), a3.mean())) 71 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch.nn 3 | from options.train_options import TrainOptions 4 | from data import create_dataloader 5 | from models import create_model 6 | from utils.util import SaveResults 7 | from utils import dataset_util, util 8 | import numpy as np 9 | import cv2 10 | torch.manual_seed(0) 11 | torch.backends.cudnn.deterministic = True 12 | torch.backends.cudnn.benchmark = False 13 | np.random.seed(0) 14 | 15 | if __name__ == '__main__': 16 | opt = TrainOptions().parse() 17 | train_data_loader = create_dataloader(opt) 18 | train_dataset_size = len(train_data_loader) 19 | print('#training images = %d' % train_dataset_size) 20 | 21 | model = create_model(opt) 22 | model.setup(opt) 23 | save_results = SaveResults(opt) 24 | total_steps = 0 25 | 26 | lr = opt.lr_task 27 | 28 | for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): 29 | epoch_start_time = time.time() 30 | iter_data_time = time.time() 31 | epoch_iter = 0 32 | 33 | # training 34 | print("training stage (epoch: %s) starting...................." % epoch) 35 | for ind, data in enumerate(train_data_loader): 36 | iter_start_time = time.time() 37 | if total_steps % opt.print_freq == 0: 38 | t_data = iter_start_time - iter_data_time 39 | total_steps += opt.batchSize 40 | epoch_iter += opt.batchSize 41 | model.set_input(data) 42 | model.optimize_parameters() 43 | if total_steps % opt.print_freq == 0: 44 | losses = model.get_current_losses() 45 | t = (time.time() - iter_start_time) / opt.batchSize 46 | save_results.print_current_losses(epoch, epoch_iter, lr, losses, t, t_data) 47 | 48 | if total_steps % opt.save_latest_freq == 0: 49 | print('saving the latest model (epoch %d, total_steps %d)' % 50 | (epoch, total_steps)) 51 | model.save_networks('latest') 52 | 53 | if total_steps % opt.save_result_freq == 0: 54 | save_results.save_current_results(model.get_current_visuals(), epoch) 55 | 56 | iter_data_time = time.time() 57 | 58 | if epoch % opt.save_epoch_freq == 0: 59 | print('saving the model at the end of epoch %d, iters %d' % 60 | (epoch, total_steps)) 61 | model.save_networks('latest') 62 | model.save_networks(epoch) 63 | print('End of epoch %d / %d \t Time Taken: %d sec' % 64 | (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) 65 | lr = model.update_learning_rate() 66 | -------------------------------------------------------------------------------- /utils/bilinear_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # Copyright 2017 Modifications Clement Godard. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | 17 | import torch 18 | 19 | def bilinear_sampler_1d_h(input_images, x_offset, wrap_mode='border', name='bilinear_sampler', **kwargs): 20 | def _repeat(x, n_repeats): 21 | 22 | rep = x.view(-1,1).repeat(1, n_repeats) 23 | return rep.reshape(-1) 24 | 25 | def _interpolate(im, x, y): 26 | 27 | # handle both texture border types 28 | _edge_size = 0 29 | if _wrap_mode == 'border': 30 | _edge_size = 1 31 | pad = torch.nn.ConstantPad2d(1, 0) 32 | im = pad(im) 33 | x = x + _edge_size 34 | y = y + _edge_size 35 | elif _wrap_mode == 'edge': 36 | _edge_size = 0 37 | else: 38 | return None 39 | 40 | x = torch.clamp(x, 0.0, _width_f -1 + 2 * _edge_size) 41 | 42 | x0_f = torch.floor(x) 43 | y0_f = torch.floor(y) 44 | x1_f = x0_f + 1 45 | 46 | x0 = x0_f.int() 47 | y0 = y0_f.int() 48 | x1 = torch.clamp(x1_f, max = _width_f -1 + 2 * _edge_size).int() 49 | 50 | dim2 = (_width + 2 * _edge_size) 51 | dim1 = (_width + 2 * _edge_size) * (_height + 2 * _edge_size) 52 | base = _repeat(torch.arange(_num_batch) * dim1, _height * _width).int().cuda() 53 | base_y0 = base + y0 * dim2 54 | idx_l = base_y0 + x0 55 | idx_r = base_y0 + x1 56 | 57 | im_per = im.permute(0, 2, 3, 1) 58 | im_flat = torch.reshape(im_per, (-1, _num_channels)) 59 | 60 | pix_l = im_flat[idx_l.long(),:] 61 | pix_r = im_flat[idx_r.long(),:] 62 | 63 | weight_l = (x1_f - x).view(-1, 1) 64 | weight_r = (x - x0_f).view(-1, 1) 65 | 66 | return weight_l * pix_l + weight_r * pix_r 67 | 68 | def _transform(input_images, x_offset): 69 | 70 | x_t_flat = torch.arange(0, _width).view(1, -1).repeat(_height, 1).float().cuda() 71 | x_t_flat = x_t_flat.view(1, _height, _width).repeat(_num_batch, 1, 1).view(-1) 72 | y_t_flat = torch.arange(0, _height).view(-1, 1).repeat(1, _width).float().cuda() 73 | y_t_flat = y_t_flat.view(1, _height, _width).repeat(_num_batch, 1, 1).view(-1) 74 | 75 | x_t_flat += x_offset.view(-1) * _width_f 76 | 77 | input_transformed = _interpolate(input_images, x_t_flat, y_t_flat) 78 | 79 | output = torch.reshape(input_transformed, [_num_batch, _height, _width, _num_channels]).permute(0, 3, 1, 2) 80 | 81 | return output 82 | 83 | _num_batch = input_images.shape[0] 84 | _height = input_images.shape[2] 85 | _width = input_images.shape[3] 86 | _num_channels = input_images.shape[1] 87 | 88 | _height_f = float(_height) 89 | _width_f = float(_width) 90 | 91 | _wrap_mode = wrap_mode 92 | 93 | output = _transform(input_images, x_offset) 94 | return output 95 | -------------------------------------------------------------------------------- /utils/dataset_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import Counter 3 | import os 4 | from PIL import Image 5 | import itertools 6 | #import png 7 | import torch.nn.functional as F 8 | 9 | def scale_pyramid(img, num_scales): 10 | scaled_imgs = [img] 11 | 12 | s = img.size() 13 | 14 | h = s[2] 15 | w = s[3] 16 | 17 | for i in range(1, num_scales): 18 | ratio = 2**i 19 | nh = h // ratio 20 | nw = w // ratio 21 | scaled_img = F.upsample(img, size=(nh, nw), mode='nearest') 22 | scaled_imgs.append(scaled_img) 23 | 24 | scaled_imgs.reverse() 25 | return scaled_imgs 26 | 27 | def compute_errors(ground_truth, predication): 28 | 29 | # accuracy 30 | threshold = np.maximum((ground_truth / predication),(predication / ground_truth)) 31 | a1 = (threshold < 1.25 ).mean() 32 | a2 = (threshold < 1.25 ** 2 ).mean() 33 | a3 = (threshold < 1.25 ** 3 ).mean() 34 | 35 | #MSE 36 | rmse = (ground_truth - predication) ** 2 37 | rmse = np.sqrt(rmse.mean()) 38 | 39 | #MSE(log) 40 | rmse_log = (np.log(ground_truth) - np.log(predication)) ** 2 41 | rmse_log = np.sqrt(rmse_log.mean()) 42 | 43 | # Abs Relative difference 44 | abs_rel = np.mean(np.abs(ground_truth - predication) / ground_truth) 45 | 46 | # Squared Relative difference 47 | sq_rel = np.mean(((ground_truth - predication) ** 2) / ground_truth) 48 | 49 | return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 50 | 51 | class KITTI: 52 | 53 | def read_calib_file(self, path): 54 | # taken from https://github.com/hunse/kitti 55 | float_chars = set("0123456789.e+- ") 56 | data = {} 57 | with open(path, 'r') as f: 58 | for line in f.readlines(): 59 | key, value = line.split(':', 1) 60 | value = value.strip() 61 | data[key] = value 62 | if float_chars.issuperset(value): 63 | # try to cast to float array 64 | try: 65 | data[key] = np.array(list(map(float, value.split(' ')))) 66 | except ValueError: 67 | # casting error: data[key] already eq. value, so pass 68 | pass 69 | 70 | return data 71 | 72 | def get_fb(self, calib_dir, cam=2): 73 | cam2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt')) 74 | P2_rect = cam2cam['P_rect_02'].reshape(3,4) 75 | P3_rect = cam2cam['P_rect_03'].reshape(3,4) 76 | 77 | # cam 2 is left of camera 0 -6cm 78 | # cam 3 is to the right +54cm 79 | b2 = P2_rect[0,3] / -P2_rect[0,0] 80 | b3 = P3_rect[0,3] / -P3_rect[0,0] 81 | baseline = b3-b2 82 | 83 | if cam==2: 84 | focal_length = P2_rect[0,0] 85 | elif cam==3: 86 | focal_length = P3_rect[0,0] 87 | 88 | return focal_length * baseline 89 | 90 | def load_velodyne_points(self, file_name): 91 | # adapted from https://github.com/hunse/kitti 92 | points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4) 93 | points[:, 3] = 1.0 # homogeneous 94 | return points 95 | 96 | def lin_interp(self, shape, xyd): 97 | # taken from https://github.com/hunse/kitti 98 | from scipy.interpolate import LinearNDInterpolator 99 | m, n = shape 100 | ij, d = xyd[:, 1::-1], xyd[:, 2] 101 | f = LinearNDInterpolator(ij, d, fill_value=0) 102 | J, I = np.meshgrid(np.arange(n), np.arange(m)) 103 | IJ = np.vstack([I.flatten(), J.flatten()]).T 104 | disparity = f(IJ).reshape(shape) 105 | return disparity 106 | 107 | def sub2ind(self, matrixSize, rowSub, colSub): 108 | m, n = matrixSize 109 | return rowSub * (n-1) + colSub - 1 110 | 111 | def get_depth(self, calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False): 112 | # load calibration files 113 | cam2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt')) 114 | velo2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_velo_to_cam.txt')) 115 | velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis])) 116 | velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0]))) 117 | 118 | # compute projection matrix velodyne->image plane 119 | R_cam2rect = np.eye(4) 120 | R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3) 121 | P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3,4) 122 | P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam) 123 | 124 | # load velodyne points and remove all behind image plane (approximation) 125 | # each row of the velodyne data is forward, left, up, reflectance 126 | velo = self.load_velodyne_points(velo_file_name) 127 | velo = velo[velo[:, 0] >= 0, :] 128 | 129 | # project the points to the camera 130 | velo_pts_im = np.dot(P_velo2im, velo.T).T 131 | velo_pts_im[:, :2] = velo_pts_im[:,:2] / velo_pts_im[:,2][..., np.newaxis] 132 | 133 | if vel_depth: 134 | velo_pts_im[:, 2] = velo[:, 0] 135 | 136 | # check if in bounds 137 | # use minus 1 to get the exact same value as KITTI matlab code 138 | velo_pts_im[:, 0] = np.round(velo_pts_im[:,0]) - 1 139 | velo_pts_im[:, 1] = np.round(velo_pts_im[:,1]) - 1 140 | val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0) 141 | val_inds = val_inds & (velo_pts_im[:,0] < im_shape[1]) & (velo_pts_im[:,1] < im_shape[0]) 142 | velo_pts_im = velo_pts_im[val_inds, :] 143 | 144 | # project to image 145 | depth = np.zeros((im_shape)) 146 | depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2] 147 | 148 | # find the duplicate points and choose the closest depth 149 | inds = self.sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0]) 150 | dupe_inds = [item for item, count in Counter(inds).items() if count > 1] 151 | for dd in dupe_inds: 152 | pts = np.where(inds==dd)[0] 153 | x_loc = int(velo_pts_im[pts[0], 0]) 154 | y_loc = int(velo_pts_im[pts[0], 1]) 155 | depth[y_loc, x_loc] = velo_pts_im[pts, 2].min() 156 | depth[depth<0] = 0 157 | 158 | if interp: 159 | # interpolate the depth map to fill in holes 160 | depth_interp = lin_interp(im_shape, velo_pts_im) 161 | return depth, depth_interp 162 | else: 163 | return depth 164 | -------------------------------------------------------------------------------- /utils/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | def __init__(self, pool_size): 7 | self.pool_size = pool_size 8 | if self.pool_size > 0: 9 | self.num_imgs = 0 10 | self.images = [] 11 | 12 | def query(self, images): 13 | if self.pool_size == 0: 14 | return images 15 | return_images = [] 16 | for image in images: 17 | image = torch.unsqueeze(image.data, 0) 18 | if self.num_imgs < self.pool_size: 19 | self.num_imgs = self.num_imgs + 1 20 | self.images.append(image) 21 | return_images.append(image) 22 | else: 23 | p = random.uniform(0, 1) 24 | if p > 0.5: 25 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 26 | tmp = self.images[random_id].clone() 27 | self.images[random_id] = image 28 | return_images.append(tmp) 29 | else: 30 | return_images.append(image) 31 | return_images = torch.cat(return_images, 0) 32 | return return_images 33 | -------------------------------------------------------------------------------- /utils/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import time 4 | import numpy as np 5 | from PIL import Image 6 | import os 7 | 8 | # save image to the disk 9 | def save_images(visuals, results_dir, ind): 10 | 11 | for label, im_data in visuals.items(): 12 | 13 | img_path = os.path.join(results_dir, '%.3d_%s.png' % (ind, label)) 14 | if 'depth' in label: 15 | pass 16 | else: 17 | image_numpy = tensor2im(im_data) 18 | save_image(image_numpy, img_path, 'RGB') 19 | 20 | # Converts a Tensor into an image array (numpy) 21 | # |imtype|: the desired type of the converted numpy array 22 | def tensor2im(input_image, imtype=np.uint8): 23 | if isinstance(input_image, torch.Tensor): 24 | image_tensor = input_image.data 25 | else: 26 | return input_image 27 | image_numpy = image_tensor[0].cpu().float().numpy() 28 | if image_numpy.shape[0] == 1: 29 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 30 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) 31 | image_numpy = image_numpy / (2.0 / 255.0) 32 | return image_numpy.astype(imtype) 33 | 34 | def tensor2depth(input_depth, imtype=np.int32): 35 | if isinstance(input_depth, torch.Tensor): 36 | depth_tensor = input_depth.data 37 | else: 38 | return input_depth 39 | depth_numpy = depth_tensor[0].cpu().float().numpy() 40 | depth_numpy += 1.0 41 | depth_numpy /= 2.0 42 | depth_numpy *= 65535.0 43 | depth_numpy = depth_numpy.reshape((depth_numpy.shape[1], depth_numpy.shape[2])) 44 | return depth_numpy.astype(imtype) 45 | 46 | def diagnose_network(net, name='network'): 47 | mean = 0.0 48 | count = 0 49 | for param in net.parameters(): 50 | if param.grad is not None: 51 | mean += torch.mean(torch.abs(param.grad.data)) 52 | count += 1 53 | if count > 0: 54 | mean = mean / count 55 | print(name) 56 | print(mean) 57 | 58 | 59 | def save_image(image_numpy, image_path, imtype): 60 | image_pil = Image.fromarray(image_numpy, imtype) 61 | image_pil.save(image_path) 62 | 63 | 64 | def print_numpy(x, val=True, shp=False): 65 | x = x.astype(np.float64) 66 | if shp: 67 | print('shape,', x.shape) 68 | if val: 69 | x = x.flatten() 70 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 71 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 72 | 73 | class SaveResults: 74 | def __init__(self, opt): 75 | 76 | self.img_dir = os.path.join(opt.checkpoints_dir, opt.expr_name, 'image') 77 | mkdirs(self.img_dir) 78 | self.log_name = os.path.join(opt.checkpoints_dir, opt.expr_name, 'loss_log.txt') 79 | with open(self.log_name, "a") as log_file: 80 | now = time.strftime("%c") 81 | log_file.write('================ Training Loss (%s) ================\n' % now) 82 | 83 | def save_current_results(self, visuals, epoch): 84 | 85 | for label, image in visuals.items(): 86 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) 87 | if image is None: 88 | continue 89 | if 'depth' in label: 90 | depth_numpy = tensor2depth(image) 91 | save_image(depth_numpy, img_path, 'I') 92 | else: 93 | image_numpy = tensor2im(image) 94 | save_image(image_numpy, img_path, 'RGB') 95 | 96 | 97 | # losses: same format as |losses| of plot_current_losses 98 | def print_current_losses(self, epoch, i, lr, losses, t, t_data): 99 | 100 | message = '(epoch: %d, iters: %d, lr: %e, time: %.3f, data: %.3f) ' % (epoch, i, lr, t, t_data) 101 | for k, v in losses.items(): 102 | message += '%s: %.6f ' % (k, v) 103 | 104 | print(message) 105 | with open(self.log_name, "a") as log_file: 106 | log_file.write('%s\n' % message) 107 | 108 | def mkdirs(paths): 109 | if isinstance(paths, list) and not isinstance(paths, str): 110 | for path in paths: 111 | mkdir(path) 112 | else: 113 | mkdir(paths) 114 | 115 | def mkdir(path): 116 | if not os.path.exists(path): 117 | os.makedirs(path) 118 | --------------------------------------------------------------------------------