├── models └── __init__.py ├── LICENSE ├── train.py ├── README.md ├── utils ├── cpp_encoding.py ├── metrics.py └── pda_aug.py ├── dataloader ├── CAM_interiorNet_depth_dataLoader.py └── CAM_ScanNet_depth_dataLoader.py ├── training ├── CPP_training.py ├── PDA_training.py ├── base_model.py └── CPP_PDA_joint_training.py └── dataset └── ScanNet ├── ScanNet_testing_natural_1080.txt ├── ScanNet_testing_uniform_1080.txt └── ScanNet_testing_restricted_1080.txt /models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Yunhan Zhao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import random, time, copy 3 | import argparse 4 | import torch 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | # ======================= dataloaders ====================== 8 | from dataloader.CAM_interiorNet_depth_dataLoader import CAM_interiorNet_depth_dataLoader 9 | from dataloader.CAM_ScanNet_depth_dataLoader import CAM_ScanNet_depth_dataLoader 10 | 11 | # ================================================ comment / uncomment to choose the training model ============================================================= # 12 | from training.CPP_training import CPP_training_model as train_model # CPP only 13 | # from training.PDA_training import PDA_training_model as train_model # PDA only 14 | # from training.CPP_PDA_joint_training import CPP_PDA_joint_training_model as train_model # PDA + CPP 15 | 16 | import warnings # ignore warnings 17 | warnings.filterwarnings("ignore") 18 | print(sys.version) 19 | print('pytorch version: {}'.format(torch.__version__)) 20 | 21 | ################## set attributes for this project/experiment ################## 22 | 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument('--exp_dir', type=str, default=os.path.join(os.getcwd(), 'experiments'), 25 | help='place to store all experiments') 26 | parser.add_argument('--project_name', type=str, help='Test Project') 27 | parser.add_argument('--data_root', type=str, default='/home/yunhaz5/project/CAM/dataset/InteriorNet', 28 | help='absolute path to dir of all datasets') 29 | parser.add_argument('--training_set_name', type=str, default='interiorNet_training_natural_10800', 30 | help='which dataset to use as training set') 31 | parser.add_argument('--testing_set_name', type=str, default='interiorNet_testing_natural_1080', 32 | help='which dataset to use as testing set') 33 | parser.add_argument('--is_train', action='store_true', help='whether this is training phase') 34 | parser.add_argument('--batch_size', type=int, default=16, help='batch size') 35 | parser.add_argument('--eval_batch_size', type=int, default=1, help='batch size') 36 | parser.add_argument('--sampleSize', type=list, default=[240, 320] , help='size of samples in experiments') 37 | parser.add_argument('--total_epoch_num', type=int, default=200, help='total number of epoch') 38 | parser.add_argument('--device', type=str, default='cpu', help='whether running on gpu') 39 | parser.add_argument('--base_lr', type=int, default=0.001, help='basic learning rate') 40 | parser.add_argument('--num_workers', type=int, default=4, help='number of workers in dataLoaders') 41 | parser.add_argument('--eval_mode', type=int, default=-1, help='eval epoch') 42 | 43 | args = parser.parse_args() 44 | 45 | if torch.cuda.is_available(): 46 | args.device='cuda' 47 | torch.cuda.empty_cache() 48 | 49 | if 'interiorNet' in args.training_set_name: 50 | training_dataset = CAM_interiorNet_depth_dataLoader(root_dir=args.data_root, set_name=args.training_set_name, size=args.sampleSize) 51 | 52 | elif 'ScanNet' in args.training_set_name: 53 | training_dataset = CAM_ScanNet_depth_dataLoader(root_dir=args.data_root, set_name=args.training_set_name, size=args.sampleSize) 54 | 55 | else: 56 | raise RuntimeError('only support following training datasets: interiorNet and ScanNet!') 57 | 58 | training_dataloader = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers) 59 | 60 | if 'interiorNet' in args.testing_set_name: 61 | testing_dataset = CAM_interiorNet_depth_dataLoader(root_dir=args.data_root, set_name=args.testing_set_name, size=args.sampleSize) 62 | 63 | elif 'ScanNet' in args.testing_set_name: 64 | if 'ScanNet' not in args.data_root: 65 | # for the case where we want to train on InteriorNet while test on ScanNet 66 | ScanNet_data_root = args.data_root[:-11] + 'ScanNet' 67 | else: 68 | ScanNet_data_root = args.data_root 69 | testing_dataset = CAM_ScanNet_depth_dataLoader(root_dir=ScanNet_data_root, set_name=args.testing_set_name, size=args.sampleSize) 70 | 71 | else: 72 | raise RuntimeError('only support following testing datasets: interiorNet and ScanNet!') 73 | 74 | testing_dataloader = DataLoader(testing_dataset, batch_size=args.eval_batch_size, shuffle=False, drop_last=False, num_workers=args.num_workers) 75 | model = train_model(args, training_dataloader, testing_dataloader) 76 | 77 | if args.is_train: 78 | model.train() 79 | else: 80 | if args.eval_mode == -1: 81 | args.eval_mode = 'best' 82 | model.evaluate(mode=args.eval_mode) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Camera Pose Matters: Improving Depth Prediction by Mitigating Pose Distribution Bias 2 | 3 | This repo contains the official Pytorch implementation of: 4 | 5 | [Camera Pose Matters: Improving Depth Prediction by Mitigating Pose Distribution Bias](https://openaccess.thecvf.com/content/CVPR2021/html/Zhao_Camera_Pose_Matters_Improving_Depth_Prediction_by_Mitigating_Pose_Distribution_CVPR_2021_paper.html) 6 | 7 | [Yunhan Zhao](https://www.ics.uci.edu/~yunhaz5/), [Shu Kong](http://www.cs.cmu.edu/~shuk/), and [Charless Fowlkes](https://www.ics.uci.edu/~fowlkes/) 8 | 9 | CVPR 2021 (oral) 10 | 11 | For more details, please check our [project website](https://www.ics.uci.edu/~yunhaz5/cvpr2021/cpp.html) 12 | 13 | ### Abstract 14 | Monocular depth predictors are typically trained on large-scale training sets which are naturally biased w.r.t the distribution of camera poses. As a result, trained predictors fail to make reliable depth predictions for testing examples captured under uncommon camera poses. To address this issue, we propose two novel techniques that exploit the camera pose during training and prediction. First, we introduce a simple perspective-aware data augmentation that synthesizes new training examples with more diverse views by perturbing the existing ones in a geometrically consistent manner. Second, we propose a conditional model that exploits the per-image camera pose as prior knowledge by encoding it as a part of the input. We show that jointly applying the two methods improves depth prediction on images captured under uncommon and even never-before-seen camera poses. We show that our methods improve performance when applied to a range of different predictor architectures. Lastly, we show that explicitly encoding the camera pose distribution improves the generalization performance of a synthetically trained depth predictor when evaluated on real images. 15 | 16 | ### Reference 17 | If you find our work useful in your research please consider citing our paper: 18 | ``` 19 | @inproceedings{zhao2021camera, 20 | title={Camera Pose Matters: Improving Depth Prediction by Mitigating Pose Distribution Bias}, 21 | author={Zhao, Yunhan and Kong, Shu and Fowlkes, Charless}, 22 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 23 | pages={15759--15768}, 24 | year={2021} 25 | } 26 | ``` 27 | 28 | ### Contents 29 | - [Requirments](#requirements) 30 | - [Dataset](#dataset) 31 | - [Training](#training) 32 | - [Evaluation](#evaluation) 33 | - [Pretrained Models](#pretrained-models) 34 | 35 | 36 | ### Requirements 37 | 1. Python 3.6 with Ubuntu 16.04 38 | 2. Pytorch 1.1.0 39 | 3. Apex 0.1 (optional) 40 | 41 | You also need other third-party libraries, such as numpy, pillow, torchvision, and tensorboardX (optional) to run the code. We use apex when training all models but it is not strictly required to run the code. 42 | 43 | ### Dataset 44 | We use InteriorNet and ScanNet in this project. The detailed data file lists are located in `dataset` folder where each file correspinds to one training/testing distribution (natural, uniform or restricted). Please download and extract the appropriate files before training. 45 | #### Dataset Structure (e.g. interiorNet_training_natural_10800) 46 | ``` 47 | interiorNet_training_natural_10800 48 | | rgb 49 | | rgb0.png 50 | | ... 51 | | depth 52 | | depth0.png 53 | | ... 54 | cam_parameter.txt 55 | ``` 56 | `cam_parameter.txt` contains the intrinsics and camera pose for each sample in the subset. Feel free to sample your own distribution and train with your own data. 57 | 58 | ### Training 59 | All training steps use one common `train.py` file so please make sure to comment/uncomment for training with CPP, PDA, or CPP + PDA. 60 | ```bash 61 | CUDA_VISIBLE_DEVICES= python train.py \ 62 | --data_root= \ 63 | --training_set_name=interiorNet_training_natural_10800 \ 64 | --testing_set_name=interiorNet_testing_natural_1080 \ 65 | --batch_size=12 --total_epoch_num=200 --is_train --eval_batch_size=10 66 | ``` 67 | `batch_size` and `eval_batch_size` are flexible to change given your working environment. Feel free to swap `interiorNet_training_natural_10800` and `interiorNet_testing_natural_1080` to train and test on different distributions. 68 | 69 | ### Evaluations 70 | Evaluate the final results 71 | ```bash 72 | CUDA_VISIBLE_DEVICES= python train.py \ 73 | --data_root= \ 74 | --training_set_name=interiorNet_training_natural_10800 \ 75 | --testing_set_name=interiorNet_testing_natural_1080 \ 76 | --eval_batch_size=10 77 | ``` 78 | If you want to evaluate with your own data, please create your own testing set with the dataset structure described above. 79 | 80 | ### Pretrained Models 81 | Pretrained models will be uploaded soon. 82 | 83 | ### Questions 84 | Please feel free to email me at (yunhaz5 [at] ics [dot] uci [dot] edu) if you have any questions. -------------------------------------------------------------------------------- /utils/cpp_encoding.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | def get_extrinsic_channel(imageTensor, focal_length, p_pt, extrinsic_para, CEILING_HEIGHT, inverse_tangent=True, augmentation=None): 5 | B, H, W = imageTensor.shape[0], imageTensor.shape[2], imageTensor.shape[3] 6 | K = _get_intrinsic_matrix(focal_length, p_pt, B) 7 | 8 | # make sure to adapt to your coordinate system 9 | cam_height, roll = extrinsic_para[:, 2], extrinsic_para[:, 4] # all with size: [B] 10 | pitch = extrinsic_para[:, 3] - np.pi/2 11 | R = torch.bmm(Rotx(pitch), Rotz(roll)) # B x 3 x 3 12 | 13 | translation_v = torch.zeros((B, 3, 1), dtype=torch.float) 14 | translation_v[:, 1, 0] = -cam_height 15 | 16 | normal = torch.tensor((0, -1, 0), dtype=torch.float).reshape(-1, 1) 17 | normal_t = torch.transpose(normal, 0, 1) 18 | 19 | # convert normal and normal_t(transpose) to batches 20 | normal = normal.unsqueeze(0).expand(B, -1, 1) 21 | normal_t = normal_t.unsqueeze(0).expand(B, 1, -1) 22 | 23 | grid_y, grid_x = np.mgrid[0:H, 0:W] 24 | grid_y, grid_x = torch.tensor(grid_y, dtype=torch.float32), torch.tensor(grid_x, dtype=torch.float32) 25 | q = torch.stack((grid_x.reshape(-1), grid_y.reshape(-1), torch.ones_like(grid_x.reshape(-1))), dim=0).unsqueeze(0).expand(B, 3, H*W) 26 | 27 | # computing points intersecting ground plane 28 | scale_f = - torch.bmm(normal_t, translation_v) / torch.bmm(torch.bmm(torch.bmm(normal_t, R), torch.inverse(K)), q) 29 | p_f = torch.bmm(torch.bmm(R, torch.inverse(K)), q) 30 | p_f = p_f * scale_f.expand_as(p_f) + translation_v 31 | k_vec = torch.tensor((0, 0, 1), dtype=torch.float).reshape(-1, 1) 32 | k_vec_t = torch.transpose(k_vec, 0, 1) 33 | k_vec_t = k_vec_t.unsqueeze(0).expand(B, 1, -1) 34 | z_f = scale_f * torch.bmm(k_vec_t, q) 35 | 36 | z_f_channel = z_f.reshape(B, 1, H, W) 37 | 38 | # computing points intersecting celing plane 39 | scale_c = (CEILING_HEIGHT- torch.bmm(normal_t, translation_v)) / torch.bmm(torch.bmm(torch.bmm(normal_t, R), torch.inverse(K)), q) 40 | p_c = torch.bmm(torch.bmm(R, torch.inverse(K)), q) 41 | p_c = p_c * scale_c.expand_as(p_c) + translation_v 42 | z_c = scale_c * torch.bmm(k_vec_t, q) 43 | 44 | z_c_channel = z_c.reshape(B, 1, H, W) 45 | 46 | extrinsic_channel = torch.zeros(B, 1, H, W) 47 | extrinsic_channel[z_f_channel > 0.] = z_f_channel[z_f_channel > 0.] 48 | extrinsic_channel[z_c_channel > 0.] = z_c_channel[z_c_channel > 0.] 49 | 50 | if inverse_tangent: 51 | extrinsic_channel = torch.atan(extrinsic_channel) 52 | 53 | if augmentation is not None: 54 | # augmentation is a bool tensor with size B, 1 means lrflip aug and 0 means original 55 | assert extrinsic_channel.dim() == 4 56 | extrinsic_channel_aug = torch.zeros_like(extrinsic_channel) 57 | extrinsic_channel_aug[augmentation] = torch.flip(extrinsic_channel[augmentation], [1, 3]) 58 | extrinsic_channel_aug[~augmentation] = extrinsic_channel[~augmentation] 59 | extrinsic_channel = extrinsic_channel_aug 60 | 61 | return extrinsic_channel 62 | 63 | def _get_intrinsic_matrix(focal_length, p_pt, batch_size): 64 | K = torch.zeros((batch_size, 9), dtype=torch.float32) 65 | K[:, -1] = 1. 66 | if isinstance(focal_length, (int, float)): 67 | # suggest fx = fy for all samples 68 | K[:, 0] = focal_length 69 | K[:, 4] = focal_length 70 | elif isinstance(focal_length, (list, tuple)): 71 | # suggest fx, fy for all samples 72 | K[:, 0] = focal_length[0] 73 | K[:, 4] = focal_length[1] 74 | elif torch.is_tensor(focal_length): 75 | if focal_length.dim() == 1: 76 | # suggest fx = fy for indivdual sample 77 | K[:, 0] = focal_length 78 | K[:, 4] = focal_length 79 | elif focal_length.dim() == 2: 80 | # suggest fx, fy for indivdual sample 81 | K[:, 0] = focal_length[:, 0] 82 | K[:, 4] = focal_length[:, 1] 83 | else: 84 | raise ValueError('focal length tensor has to have shape of [B, ] or [B, 2]') 85 | else: 86 | raise ValueError('focal length variable should be either int/float, list/tuple or tensor of size [B, ]/[B, 2]') 87 | 88 | if isinstance(p_pt, (list, tuple)): 89 | K[:, 2] = p_pt[1] 90 | K[:, 5] = p_pt[0] 91 | elif torch.is_tensor(p_pt): 92 | assert p_pt.dim() == 2 93 | K[:, 2] = p_pt[:, 1] 94 | K[:, 5] = p_pt[:, 0] 95 | else: 96 | raise ValueError('principle point variable should be either list/tuple or tensor of size [B, 2]') 97 | return K.reshape(batch_size, 3, 3) 98 | 99 | 100 | def Rotx(t): 101 | """ 102 | Rotation about the x-axis. 103 | np.array([[1, 0, 0], [0, c, -s], [0, s, c]]) 104 | 105 | -- input t shape B x 1 106 | -- return B x 3 x 3 107 | """ 108 | B = t.shape[0] 109 | Rx = torch.zeros((B, 9, 1), dtype=torch.float) 110 | 111 | c = torch.cos(t) 112 | s = torch.sin(t) 113 | ones = torch.ones(B) 114 | # print(t) 115 | # print(c.shape, c) 116 | # print(ones.shape, ones) 117 | 118 | Rx[:, 0, 0] = ones 119 | Rx[:, 4, 0] = c 120 | Rx[:, 5, 0] = -s 121 | Rx[:, 7, 0] = s 122 | Rx[:, 8, 0] = c 123 | 124 | Rx = Rx.reshape(B, 3, 3) 125 | 126 | return Rx 127 | 128 | 129 | def Roty(t): 130 | """ 131 | Rotation about the x-axis. 132 | np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) 133 | 134 | -- input t shape B x 1 135 | -- return B x 3 x 3 136 | """ 137 | B = t.shape[0] 138 | Ry = torch.zeros((B, 9, 1), dtype=torch.float) 139 | 140 | c = torch.cos(t) 141 | s = torch.sin(t) 142 | ones = torch.ones(B) 143 | 144 | Ry[:, 0, 0] = c 145 | Ry[:, 2, 0] = s 146 | Ry[:, 4, 0] = ones 147 | Ry[:, 6, 0] = -s 148 | Ry[:, 8, 0] = c 149 | 150 | Ry = Ry.reshape(B, 3, 3) 151 | 152 | return Ry 153 | 154 | def Rotz(t): 155 | """ 156 | Rotation about the z-axis. 157 | np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) 158 | 159 | -- input t shape B x 1 160 | -- return B x 3 x 3 161 | """ 162 | B = t.shape[0] 163 | Rz = torch.zeros((B, 9, 1), dtype=torch.float) 164 | 165 | c = torch.cos(t) 166 | s = torch.sin(t) 167 | ones = torch.ones(B) 168 | 169 | Rz[:, 0, 0] = c 170 | Rz[:, 1, 0] = -s 171 | Rz[:, 3, 0] = s 172 | Rz[:, 4, 0] = c 173 | Rz[:, 8, 0] = ones 174 | 175 | Rz = Rz.reshape(B, 3, 3) 176 | 177 | return Rz -------------------------------------------------------------------------------- /dataloader/CAM_interiorNet_depth_dataLoader.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import random, time, copy 3 | from skimage import io, transform 4 | import numpy as np 5 | import scipy.io as sio 6 | from scipy import misc 7 | import matplotlib.pyplot as plt 8 | import PIL.Image 9 | 10 | import skimage.transform 11 | 12 | import torch 13 | from torch.utils.data import Dataset, DataLoader 14 | import torch.nn as nn 15 | import torch.optim as optim 16 | from torch.optim import lr_scheduler 17 | import torch.nn.functional as F 18 | from torch.autograd import Variable 19 | 20 | import torchvision 21 | from torchvision import datasets, models, transforms 22 | 23 | 24 | IMG_EXTENSIONS = [ 25 | '.jpg', '.JPG', '.jpeg', '.JPEG', 26 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.bin' 27 | ] 28 | 29 | def is_image_file(filename): 30 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 31 | 32 | class CAM_interiorNet_depth_dataLoader(Dataset): 33 | def __init__(self, root_dir, set_name, size=[240, 320], downsampleDepthFactor=1, surface_normal=True): 34 | self.root_dir = root_dir 35 | self.size = size 36 | self.MIN_DEPTH_CLIP = 1.0 37 | self.MAX_DEPTH_CLIP = 10.0 38 | 39 | self.set_name = set_name # e.g., interiorNet_training_natural_10800 40 | self.include_surface_normal = surface_normal 41 | self.set_len = 0 42 | self.path2rgbFiles = [] 43 | self.downsampleDepthFactor = downsampleDepthFactor 44 | self.augmentation = True # whether to augment each batch data 45 | self.extrinsic_angle = 'radian' # radian or degree 46 | 47 | self.return_keys = ['rgb', 'depth', 'extrinsic'] 48 | if self.include_surface_normal: 49 | self.return_keys.append('surface_normal') 50 | if self.augmentation: 51 | self.return_keys.append('augmentation') 52 | self.return_values = [] 53 | 54 | self.original_focal_length = 600 55 | self.original_p_pt = [240, 320] 56 | 57 | rgbFileNameList = os.listdir(os.path.join(self.root_dir, self.set_name, 'rgb')) 58 | # for fName in sorted(rgbFileNameList): 59 | for fName in rgbFileNameList: 60 | if is_image_file(fName): 61 | path = os.path.join(self.root_dir, self.set_name, 'rgb', fName) 62 | self.path2rgbFiles.append(path) 63 | 64 | self.set_len = len(self.path2rgbFiles) 65 | 66 | # read cam parameter file 67 | camFileName = os.path.join(self.root_dir, self.set_name, 'cam_parameter.txt') 68 | self.camParamerterDict = {} 69 | with open(camFileName, 'r') as f: 70 | for l in f: 71 | # this order is entirely up to you and could be changed when create cam_parameter.txt 72 | fileName, p_x, p_y, p_z, pitch, roll, yaw = l.rstrip('\n').split(' ') 73 | # change to load as roll, pitch, yaw 74 | if self.extrinsic_angle == 'degree': 75 | self.camParamerterDict[fileName] = np.array((float(p_x), float(p_y), float(p_z), float(roll), float(pitch), float(yaw))) 76 | elif self.extrinsic_angle == 'radian': 77 | self.camParamerterDict[fileName] = np.array((float(p_x), float(p_y), float(p_z), np.deg2rad(float(roll)), np.deg2rad(float(pitch)), 78 | np.deg2rad(float(yaw)))) 79 | else: 80 | raise RuntimeError('choose angle representation between radian or degree') 81 | 82 | self.TF2tensor = transforms.ToTensor() 83 | self.TF2PIL = transforms.ToPILImage() 84 | self.TFNormalize = transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)) 85 | self.funcResizeTensor = nn.Upsample(size=self.size, mode='nearest', align_corners=None) 86 | self.funcResizeDepth = nn.Upsample(size=[int(self.size[0]*self.downsampleDepthFactor), 87 | int(self.size[1]*self.downsampleDepthFactor)], 88 | mode='nearest', align_corners=None) 89 | 90 | def __len__(self): 91 | return self.set_len 92 | 93 | def __getitem__(self, idx): 94 | rgbFileName = self.path2rgbFiles[idx] 95 | return_dict = {} 96 | return_dict.fromkeys(self.return_keys) 97 | return_dict = self.fetch_img_and_corresponding_labels(rgbFileName, return_dict) 98 | return_dict = self.fetch_corresponding_cam_parameters(rgbFileName, return_dict) 99 | 100 | return return_dict 101 | 102 | def get_dataset_name(self): 103 | return self.set_name 104 | 105 | def distance_2_depth(self, distance_map): 106 | H, W = distance_map.shape[0], distance_map.shape[1] 107 | y_grid, x_grid = np.mgrid[0:H, 0:W] 108 | y_vector, x_vector = y_grid.astype(np.float32).reshape(1, H*W), x_grid.astype(np.float32).reshape(1, H*W) 109 | 110 | y = (y_vector - self.original_p_pt[0]) / self.original_focal_length 111 | x = (x_vector - self.original_p_pt[1]) / self.original_focal_length 112 | 113 | depth_map = distance_map.flatten() / np.sqrt(x**2 + y**2 + 1) 114 | depth_map = depth_map.reshape(H, W) 115 | 116 | return depth_map 117 | 118 | def fetch_img_and_corresponding_labels(self, rgbFileName, return_dict): 119 | if 'training' in self.set_name and self.augmentation: 120 | if np.random.random(1) > 0.5: 121 | augmentation = True 122 | else: 123 | augmentation = False 124 | else: 125 | augmentation = False 126 | return_dict['augmentation'] = augmentation 127 | 128 | image = PIL.Image.open(rgbFileName).convert('RGB') 129 | image = np.array(image, dtype=np.float32) / 255. 130 | if augmentation: 131 | image = np.fliplr(image).copy() 132 | imageT = self.TF2tensor(image) 133 | try: 134 | imageT = self.TFNormalize(imageT) 135 | except RuntimeError: 136 | print('image shape missmatch error') 137 | print(rgbFileName) 138 | imageT = imageT.unsqueeze(0) # need 4D data to resize tensor 139 | imageT = self.funcResizeTensor(imageT) 140 | imageT = imageT.squeeze(0) 141 | 142 | return_dict['rgb'] = imageT 143 | 144 | fileName = rgbFileName.split('/')[-1] 145 | depthFileName = os.path.join(self.root_dir, self.set_name, 'depth', fileName) 146 | depth = PIL.Image.open(depthFileName) 147 | depth = np.array(depth, dtype=np.float32) / 1000. # [480, 640] 148 | # print(depth.min(), depth.max()) 149 | depth = self.distance_2_depth(depth) 150 | depth = np.expand_dims(depth, 2) 151 | if augmentation: 152 | depth = np.fliplr(depth).copy() 153 | depthT = self.TF2tensor(depth) 154 | depthT = self.preprocess_depth(depthT, mode='tanh') 155 | depthT = depthT.unsqueeze(0) # need 4D data to resize tensor 156 | depthT = self.funcResizeTensor(depthT) 157 | depthT = depthT.squeeze(0) 158 | 159 | return_dict['depth'] = depthT 160 | 161 | if self.include_surface_normal: 162 | normalFileName = os.path.join(self.root_dir, self.set_name, 'surface_normal', fileName) 163 | normal = PIL.Image.open(normalFileName) 164 | normal = np.array(normal, dtype=np.float32) # shape (H, W, 3), [0, 255] 165 | if augmentation: 166 | normal = np.fliplr(normal).copy() 167 | normalT = self.TF2tensor(normal) 168 | return_dict['surface_normal'] = normalT 169 | 170 | return return_dict 171 | 172 | def fetch_corresponding_cam_parameters(self, rgbFileName, return_dict): 173 | fileName = rgbFileName.split('/')[-1].split('.')[0] 174 | # print(fileName) 175 | return_dict['extrinsic'] = self.camParamerterDict[fileName] 176 | return return_dict 177 | 178 | 179 | def convert_distance_to_depth(self, distance, cam_paraeter): 180 | pass 181 | 182 | def preprocess_depth(self, depthT, mode='tanh'): 183 | ''' 184 | preprocess depth tensor before feed into the network 185 | mode: choose from depth [0, max_depth], disparity [0, 1], tanh [-1.0, 1.0] 186 | ''' 187 | # depthT = np.clip(depthT, self.MIN_DEPTH_CLIP, self.MAX_DEPTH_CLIP) # [0, 25.0] 188 | if 'training' in self.set_name: 189 | if mode == 'tanh': 190 | return (((depthT - self.MIN_DEPTH_CLIP) / (self.MAX_DEPTH_CLIP - self.MIN_DEPTH_CLIP)) - 0.5) * 2.0 # mask out depth over 191 | elif mode == 'depth': 192 | return depthT 193 | else: 194 | return depthT -------------------------------------------------------------------------------- /dataloader/CAM_ScanNet_depth_dataLoader.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import random, time, copy 3 | from skimage import io, transform 4 | import numpy as np 5 | import scipy.io as sio 6 | from scipy import misc 7 | import matplotlib.pyplot as plt 8 | import PIL.Image 9 | import cv2 10 | 11 | import skimage.transform 12 | 13 | import torch 14 | from torch.utils.data import Dataset, DataLoader 15 | import torch.nn as nn 16 | import torch.optim as optim 17 | from torch.optim import lr_scheduler 18 | import torch.nn.functional as F 19 | from torch.autograd import Variable 20 | 21 | import torchvision 22 | from torchvision import datasets, models, transforms 23 | 24 | 25 | IMG_EXTENSIONS = [ 26 | '.jpg', '.JPG', '.jpeg', '.JPEG', 27 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.bin' 28 | ] 29 | 30 | def is_image_file(filename): 31 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 32 | 33 | class CAM_ScanNet_depth_dataLoader(Dataset): 34 | def __init__(self, root_dir, set_name, size=[240, 320], downsampleDepthFactor=1, surface_normal=False, augmentation=True): 35 | self.root_dir = root_dir 36 | self.size = size 37 | self.MIN_DEPTH_CLIP = 1.0 38 | self.MAX_DEPTH_CLIP = 10.0 39 | 40 | self.set_name = set_name # e.g., ScanNet_all_uniform_1080/ 41 | self.include_surface_normal = surface_normal 42 | self.set_len = 0 43 | self.path2rgbFiles = [] 44 | self.downsampleDepthFactor = downsampleDepthFactor 45 | self.augmentation = augmentation # whether to augment each batch data 46 | self.extrinsic_angle = 'radian' # radian or degree 47 | 48 | self.return_keys = ['rgb', 'depth', 'intrinsic', 'extrinsic', 'augmentation'] 49 | if self.include_surface_normal: 50 | self.return_keys.append('surface_normal') 51 | self.return_values = [] 52 | 53 | self.color_original_size = (968, 1296) 54 | 55 | rgbFileNameList = os.listdir(os.path.join(self.root_dir, self.set_name, 'rgb')) 56 | for fName in rgbFileNameList: 57 | if is_image_file(fName): 58 | path = os.path.join(self.root_dir, self.set_name, 'rgb', fName) 59 | self.path2rgbFiles.append(path) 60 | 61 | self.set_len = len(self.path2rgbFiles) 62 | 63 | # read cam parameter file 64 | camFileName = os.path.join(self.root_dir, self.set_name, 'cam_parameter.txt') 65 | self.extrinsicParaDict = {} 66 | self.intrinsicParaDict = {} 67 | with open(camFileName, 'r') as f: 68 | for l in f: 69 | fileName, intrinsic, extrinsic = l.rstrip('\n').split('|') 70 | p_x, p_y, p_z, roll, pitch, yaw = extrinsic.split(' ') 71 | intrinsic_list = intrinsic.split(' ') 72 | f_x, p_ptx, f_y, p_pty = intrinsic_list[0], intrinsic_list[2], intrinsic_list[5], intrinsic_list[6] 73 | 74 | self.intrinsicParaDict[fileName] = np.array((float(f_x), float(f_y), float(p_ptx), float(p_pty))) 75 | 76 | # being consistent with interiorNet dataLoader 77 | if self.extrinsic_angle == 'degree': 78 | self.extrinsicParaDict[fileName] = np.array((float(p_x), float(p_y), float(p_z), float(pitch), float(roll), float(yaw))) 79 | elif self.extrinsic_angle == 'radian': 80 | self.extrinsicParaDict[fileName] = np.array((float(p_x), float(p_y), float(p_z), np.deg2rad(float(pitch)), np.deg2rad(float(roll)), 81 | np.deg2rad(float(yaw)))) 82 | else: 83 | raise RuntimeError('choose angle representation between radian or degree') 84 | 85 | self.TF2tensor = transforms.ToTensor() 86 | self.TF2PIL = transforms.ToPILImage() 87 | self.TFNormalize = transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)) 88 | self.funcResizeTensor = nn.Upsample(size=self.size, mode='nearest', align_corners=None) 89 | self.funcResizeDepth = nn.Upsample(size=[int(self.size[0]*self.downsampleDepthFactor), 90 | int(self.size[1]*self.downsampleDepthFactor)], 91 | mode='nearest', align_corners=None) 92 | 93 | def __len__(self): 94 | return self.set_len 95 | 96 | def __getitem__(self, idx): 97 | rgbFileName = self.path2rgbFiles[idx] 98 | return_dict = {} 99 | return_dict.fromkeys(self.return_keys) 100 | return_dict = self.fetch_img_and_corresponding_labels(rgbFileName, return_dict) 101 | return_dict = self.fetch_corresponding_cam_parameters(rgbFileName, return_dict) 102 | 103 | return return_dict 104 | 105 | def get_dataset_name(self): 106 | return self.set_name 107 | 108 | def distance_2_depth(self, distance_map): 109 | H, W = distance_map.shape[0], distance_map.shape[1] 110 | y_grid, x_grid = np.mgrid[0:H, 0:W] 111 | y_vector, x_vector = y_grid.astype(np.float32).reshape(1, H*W), x_grid.astype(np.float32).reshape(1, H*W) 112 | 113 | y = (y_vector - self.original_p_pt[0]) / self.original_focal_length 114 | x = (x_vector - self.original_p_pt[1]) / self.original_focal_length 115 | 116 | depth_map = distance_map.flatten() / np.sqrt(x**2 + y**2 + 1) 117 | depth_map = depth_map.reshape(H, W) 118 | 119 | return depth_map 120 | 121 | def fetch_img_and_corresponding_labels(self, rgbFileName, return_dict): 122 | if 'training' in self.set_name and self.augmentation: 123 | if np.random.random(1) > 0.5: 124 | augmentation = True 125 | else: 126 | augmentation = False 127 | else: 128 | augmentation = False 129 | return_dict['augmentation'] = augmentation 130 | 131 | image = PIL.Image.open(rgbFileName).convert('RGB') 132 | image = np.array(image, dtype=np.float32) / 255. 133 | 134 | if augmentation: 135 | image = np.fliplr(image).copy() 136 | imageT = self.TF2tensor(image) 137 | try: 138 | imageT = self.TFNormalize(imageT) 139 | except RuntimeError: 140 | print('image shape missmatch error') 141 | print(rgbFileName) 142 | imageT = imageT.unsqueeze(0) # need 4D data to resize tensor 143 | imageT = self.funcResizeTensor(imageT) 144 | imageT = imageT.squeeze(0) 145 | 146 | return_dict['rgb'] = imageT 147 | 148 | fileName = rgbFileName.split('/')[-1].split('.')[0] 149 | depthFileName = os.path.join(self.root_dir, self.set_name, 'depth', fileName + '.pgm') 150 | depth = cv2.imread(depthFileName, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH).astype(np.float32) / 1000. 151 | depth = np.expand_dims(depth, 2) 152 | if augmentation: 153 | depth = np.fliplr(depth).copy() 154 | depthT = self.TF2tensor(depth) 155 | depthT = self.preprocess_depth(depthT, mode='tanh') 156 | depthT = depthT.unsqueeze(0) # need 4D data to resize tensor 157 | depthT = self.funcResizeTensor(depthT) 158 | depthT = depthT.squeeze(0) 159 | 160 | return_dict['depth'] = depthT 161 | 162 | if self.include_surface_normal: 163 | normalFileName = os.path.join(self.root_dir, self.set_name, 'surface_normal', fileName) 164 | normal = PIL.Image.open(normalFileName) 165 | normal = np.array(normal, dtype=np.float32) # shape (H, W, 3), [0, 255] 166 | if augmentation: 167 | normal = np.fliplr(normal).copy() 168 | normalT = self.TF2tensor(normal) 169 | return_dict['surface_normal'] = normalT 170 | 171 | return return_dict 172 | 173 | def fetch_corresponding_cam_parameters(self, rgbFileName, return_dict): 174 | fileName = rgbFileName.split('/')[-1].split('.')[0] 175 | 176 | return_dict['extrinsic'] = self.extrinsicParaDict[fileName] 177 | original_intrinsics = self.intrinsicParaDict[fileName] 178 | 179 | updated_intrinsic = self.updated_intrinsic_parameters(original_intrinsics) 180 | return_dict['intrinsic'] = updated_intrinsic 181 | 182 | return return_dict 183 | 184 | def updated_intrinsic_parameters(self, original_intrinsics): 185 | x_scale = self.color_original_size[1] / self.size[1] 186 | y_scale = self.color_original_size[0] / self.size[0] 187 | 188 | updated_f_x = original_intrinsics[0] / x_scale 189 | updated_f_y = original_intrinsics[1] / y_scale 190 | 191 | p_ptx = int(original_intrinsics[2] / x_scale) 192 | p_pty = int(original_intrinsics[3] / y_scale) 193 | 194 | return np.array((updated_f_x, updated_f_y, p_ptx, p_pty), dtype=np.float32) 195 | 196 | def preprocess_depth(self, depthT, mode='tanh'): 197 | ''' 198 | preprocess depth tensor before feed into the network 199 | mode: choose from depth [0, max_depth], disparity [0, 1], tanh [-1.0, 1.0] 200 | ''' 201 | if 'training' in self.set_name: 202 | if mode == 'tanh': 203 | return (((depthT - self.MIN_DEPTH_CLIP) / (self.MAX_DEPTH_CLIP - self.MIN_DEPTH_CLIP)) - 0.5) * 2.0 # mask out depth over 204 | elif mode == 'depth': 205 | return depthT 206 | else: 207 | return depthT -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import math 4 | 5 | 6 | 7 | def log10(x): 8 | """Convert a new tensor with the base-10 logarithm of the elements of x. """ 9 | return torch.log(x) / math.log(10) 10 | 11 | class Result(object): 12 | def __init__(self, mask_min, mask_max): 13 | self.irmse, self.imae = 0, 0 14 | self.mse, self.rmse, self.mae = 0, 0, 0 15 | self.absrel, self.lg10 = 0, 0 16 | self.delta1, self.delta2, self.delta3 = 0, 0, 0 17 | self.data_time, self.gpu_time = 0, 0 18 | self.mask_min = mask_min 19 | self.mask_max = mask_max 20 | 21 | def set_to_worst(self): 22 | self.irmse, self.imae = np.inf, np.inf 23 | self.mse, self.rmse, self.mae = np.inf, np.inf, np.inf 24 | self.absrel, self.lg10 = np.inf, np.inf 25 | self.delta1, self.delta2, self.delta3 = 0, 0, 0 26 | self.data_time, self.gpu_time = 0, 0 27 | 28 | def update(self, irmse, imae, mse, rmse, mae, absrel, lg10, delta1, delta2, delta3, gpu_time, data_time): 29 | self.irmse, self.imae = irmse, imae 30 | self.mse, self.rmse, self.mae = mse, rmse, mae 31 | self.absrel, self.lg10 = absrel, lg10 32 | self.delta1, self.delta2, self.delta3 = delta1, delta2, delta3 33 | self.data_time, self.gpu_time = data_time, gpu_time 34 | 35 | def evaluate(self, output, target): 36 | 37 | # not quite sure whether this is useful 38 | # target[target < self.mask_min] = self.mask_min 39 | # target[target > self.mask_max] = self.mask_max 40 | 41 | # valid_mask = np.logical_and(target > self.mask_min, target < self.mask_max) 42 | valid_mask = np.logical_and(target >= self.mask_min, target <= self.mask_max) 43 | output = output[valid_mask] 44 | target = target[valid_mask] 45 | 46 | abs_diff = (output - target).abs() 47 | diff = (output - target) 48 | 49 | self.mse = float((torch.pow(abs_diff, 2)).mean()) 50 | self.rmse = math.sqrt(self.mse) 51 | self.rmselog = math.sqrt(float(((torch.log(target) - torch.log(output)) ** 2).mean())) 52 | 53 | self.mae = float(abs_diff.mean()) 54 | self.lg10 = float((log10(output) - log10(target)).abs().mean()) 55 | self.absrel = float((abs_diff / target).mean()) 56 | self.sqrel = float(((diff ** 2) / target).mean()) 57 | 58 | maxRatio = torch.max(output / target, target / output) 59 | self.delta1 = float((maxRatio < 1.25).float().mean()) 60 | self.delta2 = float((maxRatio < 1.25 ** 2).float().mean()) 61 | self.delta3 = float((maxRatio < 1.25 ** 3).float().mean()) 62 | self.data_time = 0 63 | self.gpu_time = 0 64 | 65 | inv_output = 1 / output 66 | inv_target = 1 / target 67 | abs_inv_diff = (inv_output - inv_target).abs() 68 | self.irmse = math.sqrt((torch.pow(abs_inv_diff, 2)).mean()) 69 | self.imae = float(abs_inv_diff.mean()) 70 | 71 | def individual_results(self, output_all, target_all): 72 | B = target_all.shape[0] 73 | individual_results_t = torch.zeros((B, 7), dtype=torch.float32) 74 | for i in range(B): 75 | target = target_all[i] 76 | output = output_all[i] 77 | valid_mask = np.logical_and(target >= self.mask_min, target <= self.mask_max) 78 | output = output[valid_mask] 79 | target = target[valid_mask] 80 | 81 | abs_diff = (output - target).abs() 82 | diff = (output - target) 83 | 84 | mse = float((torch.pow(abs_diff, 2)).mean()) 85 | rmse = math.sqrt(mse) 86 | rmselog = math.sqrt(float(((torch.log(target) - torch.log(output)) ** 2).mean())) 87 | 88 | mae = float(abs_diff.mean()) 89 | # lg10 = float((log10(output) - log10(target)).abs().mean()) 90 | absrel = float((abs_diff / target).mean()) 91 | sqrel = float(((diff ** 2) / target).mean()) 92 | 93 | maxRatio = torch.max(output / target, target / output) 94 | delta1 = float((maxRatio < 1.25).float().mean()) 95 | delta2 = float((maxRatio < 1.25 ** 2).float().mean()) 96 | delta3 = float((maxRatio < 1.25 ** 3).float().mean()) 97 | # self.data_time = 0 98 | # self.gpu_time = 0 99 | 100 | # inv_output = 1 / output 101 | # inv_target = 1 / target 102 | # abs_inv_diff = (inv_output - inv_target).abs() 103 | # self.irmse = math.sqrt((torch.pow(abs_inv_diff, 2)).mean()) 104 | # self.imae = float(abs_inv_diff.mean()) 105 | 106 | individual_results_t[i, 0] = absrel 107 | individual_results_t[i, 1] = sqrel 108 | individual_results_t[i, 2] = rmse 109 | individual_results_t[i, 3] = rmselog 110 | 111 | individual_results_t[i, 4] = delta1 112 | individual_results_t[i, 5] = delta2 113 | individual_results_t[i, 6] = delta3 114 | 115 | return individual_results_t 116 | 117 | 118 | 119 | class Result_withIdx(object): 120 | def __init__(self, mask_min, mask_max): 121 | self.irmse, self.imae = 0, 0 122 | self.mse, self.rmse, self.mae = 0, 0, 0 123 | self.absrel, self.lg10 = 0, 0 124 | self.delta1, self.delta2, self.delta3 = 0, 0, 0 125 | self.data_time, self.gpu_time = 0, 0 126 | self.mask_min = mask_min 127 | self.mask_max = mask_max 128 | 129 | def set_to_worst(self): 130 | self.irmse, self.imae = np.inf, np.inf 131 | self.mse, self.rmse, self.mae = np.inf, np.inf, np.inf 132 | self.absrel, self.lg10 = np.inf, np.inf 133 | self.delta1, self.delta2, self.delta3 = 0, 0, 0 134 | self.data_time, self.gpu_time = 0, 0 135 | 136 | def update(self, irmse, imae, mse, rmse, mae, absrel, lg10, delta1, delta2, delta3, gpu_time, data_time): 137 | self.irmse, self.imae = irmse, imae 138 | self.mse, self.rmse, self.mae = mse, rmse, mae 139 | self.absrel, self.lg10 = absrel, lg10 140 | self.delta1, self.delta2, self.delta3 = delta1, delta2, delta3 141 | self.data_time, self.gpu_time = data_time, gpu_time 142 | 143 | def evaluate(self, output, target, idx_tensor): 144 | # idx_tensor should have the same size as output and target 145 | 146 | valid_mask = np.logical_and(target > self.mask_min, target < self.mask_max) 147 | # print(valid_mask.shape, type(valid_mask)) 148 | # print(valid_mask) 149 | # print(valid_mask.shape, idx_tensor.shape) 150 | final_mask = valid_mask & idx_tensor 151 | # print(final_mask.shape) 152 | output = output[final_mask] 153 | target = target[final_mask] 154 | 155 | abs_diff = (output - target).abs() 156 | diff = (output - target) 157 | 158 | self.mse = float((torch.pow(abs_diff, 2)).mean()) 159 | self.rmse = math.sqrt(self.mse) 160 | self.rmselog = math.sqrt(float(((torch.log(target) - torch.log(output)) ** 2).mean())) 161 | 162 | self.mae = float(abs_diff.mean()) 163 | self.lg10 = float((log10(output) - log10(target)).abs().mean()) 164 | self.absrel = float((abs_diff / target).mean()) 165 | self.sqrel = float(((diff ** 2) / target).mean()) 166 | 167 | maxRatio = torch.max(output / target, target / output) 168 | self.delta1 = float((maxRatio < 1.25).float().mean()) 169 | self.delta2 = float((maxRatio < 1.25 ** 2).float().mean()) 170 | self.delta3 = float((maxRatio < 1.25 ** 3).float().mean()) 171 | self.data_time = 0 172 | self.gpu_time = 0 173 | 174 | inv_output = 1 / output 175 | inv_target = 1 / target 176 | abs_inv_diff = (inv_output - inv_target).abs() 177 | self.irmse = math.sqrt((torch.pow(abs_inv_diff, 2)).mean()) 178 | self.imae = float(abs_inv_diff.mean()) 179 | 180 | 181 | def miou(pred, target, n_classes=12): 182 | ious = [] 183 | pred = pred.view(-1) 184 | target = target.view(-1) 185 | 186 | # Ignore IoU for background class ("0") 187 | for cls in range(0, n_classes): # This goes from 1:n_classes-1 -> class "0" is ignored 188 | pred_inds = pred == cls 189 | target_inds = target == cls 190 | intersection = (pred_inds[target_inds]).long().sum().data.cpu()[0] # Cast to long to prevent overflows 191 | union = pred_inds.long().sum().data.cpu()[0] + target_inds.long().sum().data.cpu()[0] - intersection 192 | if union == 0: ious.append(float('nan')) # If there is no ground truth, do not include in evaluation 193 | else:ious.append(float(intersection) / float(max(union, 1))) 194 | return np.array(ious) 195 | 196 | 197 | def im2col_sliding_broadcasting(A, BSZ, stepsize=1): 198 | # Parameters 199 | M,N = A.shape[0],A.shape[1] 200 | col_extent = N - BSZ[1] + 1 201 | row_extent = M - BSZ[0] + 1 202 | 203 | # Get Starting block indices 204 | start_idx = np.arange(BSZ[0])[:,None]*N + np.arange(BSZ[1]) 205 | 206 | # Get offsetted indices across the height and width of input array 207 | offset_idx = np.arange(row_extent)[:,None]*N + np.arange(col_extent) 208 | 209 | # Get all actual indices & index into input array for final output 210 | return np.take (A,start_idx.ravel()[:,None] + offset_idx.ravel()[::stepsize]) 211 | 212 | 213 | def rgb2ycbcr(im): 214 | cbcr = np.empty_like(im) 215 | r = im[:,:,0] 216 | g = im[:,:,1] 217 | b = im[:,:,2] 218 | # Y 219 | cbcr[:,:,0] = .299 * r + .587 * g + .114 * b 220 | # Cb 221 | cbcr[:,:,1] = 128 - .169 * r - .331 * g + .5 * b 222 | # Cr 223 | cbcr[:,:,2] = 128 + .5 * r - .419 * g - .081 * b 224 | return cbcr # np.uint8(cbcr) 225 | 226 | def ycbcr2rgb(im): 227 | rgb = np.empty_like(im) 228 | y = im[:,:,0] 229 | cb = im[:,:,1] - 128 230 | cr = im[:,:,2] - 128 231 | # R 232 | rgb[:,:,0] = y + 1.402 * cr 233 | # G 234 | rgb[:,:,1] = y - .34414 * cb - .71414 * cr 235 | # B 236 | rgb[:,:,2] = y + 1.772 * cb 237 | return rgb # np.uint8(rgb) 238 | 239 | 240 | def img_greyscale(img): 241 | return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2] 242 | -------------------------------------------------------------------------------- /training/CPP_training.py: -------------------------------------------------------------------------------- 1 | import os, time, sys 2 | import torch 3 | from torch.utils.data import Dataset, DataLoader 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | from torch.optim import lr_scheduler 7 | import torch.nn.functional as F 8 | from torch.autograd import Variable 9 | 10 | import torchvision 11 | from torchvision import datasets, models, transforms 12 | from torchvision.utils import make_grid 13 | 14 | from models.T2Net import _UNetGenerator, init_weights 15 | 16 | from utils.metrics import * 17 | from utils.cpp_encoding import get_extrinsic_channel 18 | 19 | from training.base_model import set_requires_grad, base_model 20 | 21 | try: 22 | from apex import amp 23 | except ImportError: 24 | raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run with apex.") 25 | 26 | class CPP_training_model(base_model): 27 | def __init__(self, args, training_dataloader, testing_dataloader): 28 | super(CPP_training_model, self).__init__(args) 29 | self._initialize_training() 30 | 31 | self.training_dataloader = training_dataloader 32 | self.testing_dataloader = testing_dataloader 33 | self.MIN_DEPTH_CLIP = 1.0 34 | self.MAX_DEPTH_CLIP = 10.0 35 | 36 | self.EVAL_DEPTH_MIN = 1.0 37 | self.EVAL_DEPTH_MAX = 10.0 38 | 39 | self.CEILING_HEIGHT = 3.0 40 | 41 | self.save_evaluate_steps = 10 42 | 43 | self.depthEstModel = _UNetGenerator(input_nc = 4, output_nc = 1) 44 | self.model_name = ['depthEstModel'] 45 | 46 | if self.is_train: 47 | self.depth_optimizer = optim.Adam(self.depthEstModel.parameters(), lr=self.base_lr, betas=(0.5, 0.999)) 48 | self.optim_name = ['depth_optimizer'] 49 | self._get_scheduler() 50 | self.L1Loss = nn.L1Loss() 51 | self._initialize_networks(['depthEstModel']) 52 | 53 | # apex can only be applied to CUDA models 54 | if self.use_apex: 55 | self._init_apex(Num_losses=2) 56 | 57 | self.EVAL_best_loss = float('inf') 58 | self.EVAL_best_model_epoch = 0 59 | self.EVAL_all_results = {} 60 | 61 | self._check_parallel() 62 | 63 | def _get_project_name(self): 64 | return 'CPP_training_model' 65 | 66 | def _initialize_networks(self, model_name): 67 | for name in model_name: 68 | getattr(self, name).train().to(self.device) 69 | init_weights(getattr(self, name), net_name=name, init_type='normal', gain=0.02) 70 | 71 | def compute_depth_loss(self, rgb, gt_depth, mask=None): 72 | predicted_depth = self.depthEstModel(rgb.detach())[-1] 73 | 74 | if mask is not None: 75 | loss = self.L1Loss(predicted_depth[mask], gt_depth[mask]) 76 | 77 | else: 78 | loss = self.L1Loss(predicted_depth, gt_depth) 79 | 80 | return loss 81 | 82 | def train(self): 83 | phase = 'train' 84 | since = time.time() 85 | best_loss = float('inf') 86 | 87 | self.train_display_freq = len(self.training_dataloader) 88 | 89 | tensorboardX_iter_count = 0 90 | for epoch in range(self.total_epoch_num): 91 | print('\nEpoch {}/{}'.format(epoch+1, self.total_epoch_num)) 92 | print('-' * 10) 93 | fn = open(self.train_log,'a') 94 | fn.write('\nEpoch {}/{}\n'.format(epoch+1, self.total_epoch_num)) 95 | fn.write('--'*5+'\n') 96 | fn.close() 97 | 98 | self._set_models_train(['depthEstModel']) 99 | 100 | # Iterate over data. 101 | iterCount = 0 102 | 103 | for sample_dict in self.training_dataloader: 104 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 105 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 106 | if "intrinsic" in sample_dict.keys(): 107 | # for ScanNet only 108 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 109 | focal_length = intrinsic_para[:, :2] 110 | p_pt = intrinsic_para[:, 2:] 111 | else: 112 | # for interiorNet 113 | focal_length = 300 114 | p_pt = (120, 160) 115 | 116 | extrinsic_channel = get_extrinsic_channel(imageTensor, focal_length, p_pt, extrinsic_para, self.CEILING_HEIGHT, augmentation=sample_dict['augmentation']) 117 | 118 | imageTensor_C = torch.cat((imageTensor, extrinsic_channel), dim=1) 119 | imageTensor_C = imageTensor_C.to(self.device) 120 | depthGTTensor = depthGTTensor.to(self.device) # [B_size, 1, 240, 320] 121 | valid_mask = (depthGTTensor >= -1.) & (depthGTTensor <= 1.) 122 | 123 | with torch.set_grad_enabled(phase=='train'): 124 | 125 | total_loss = 0. 126 | ############# train the depthEstimator 127 | self.depth_optimizer.zero_grad() 128 | depth_loss = self.compute_depth_loss(imageTensor_C, depthGTTensor, valid_mask) 129 | total_loss += depth_loss 130 | 131 | if self.use_apex: 132 | with amp.scale_loss(total_loss, self.depth_optimizer) as total_loss_scaled: 133 | total_loss_scaled.backward() 134 | else: 135 | total_loss.backward() 136 | 137 | self.depth_optimizer.step() 138 | 139 | iterCount += 1 140 | if iterCount % 20 == 0: 141 | loss_name = ['total_loss', 'depth_loss'] 142 | loss_value = [total_loss, depth_loss] 143 | self.print_and_write_loss_summary(iterCount, len(self.training_dataloader), loss_name, loss_value, self.train_log) 144 | 145 | # take step in optimizer 146 | for scheduler in self.scheduler_list: 147 | scheduler.step() 148 | for optim in self.optim_name: 149 | lr = getattr(self, optim).param_groups[0]['lr'] 150 | lr_update = 'Epoch {}/{} finished: {} learning rate = {:7f}'.format(epoch+1, self.total_epoch_num, optim, lr) 151 | print(lr_update) 152 | 153 | fn = open(self.train_log,'a') 154 | fn.write(lr_update + '\n') 155 | fn.close() 156 | 157 | if (epoch+1) % self.save_evaluate_steps == 0: 158 | self.save_models(self.model_name, mode=epoch+1) 159 | _ = self.evaluate(epoch+1) 160 | 161 | time_elapsed = time.time() - since 162 | print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) 163 | 164 | fn = open(self.train_log,'a') 165 | fn.write('Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)) 166 | fn.close() 167 | 168 | best_model_summary = 'Overall best model is epoch {}'.format(self.EVAL_best_model_epoch) 169 | print(best_model_summary) 170 | print(self.EVAL_all_results[str(self.EVAL_best_model_epoch)]) 171 | fn = open(self.evaluate_log, 'a') 172 | fn.write(best_model_summary + '\n') 173 | fn.write(self.EVAL_all_results[str(self.EVAL_best_model_epoch)] + '\n') 174 | fn.close() 175 | 176 | def evaluate(self, mode): 177 | ''' 178 | mode choose from or best 179 | is the number of epoch, represents the number of epoch, used for in training evaluation 180 | 'best' is used for after training mode 181 | ''' 182 | 183 | set_name = 'test' 184 | eval_model_list = ['depthEstModel'] 185 | 186 | if isinstance(mode, int) and self.is_train: 187 | self._set_models_eval(eval_model_list) 188 | if self.EVAL_best_loss == float('inf'): 189 | fn = open(self.evaluate_log, 'w') 190 | else: 191 | fn = open(self.evaluate_log, 'a') 192 | 193 | fn.write('Evaluating with mode: {} | dataset: {} \n'.format(mode, self.testing_set_name)) 194 | fn.write('\tEvaluation range min: {} | max: {} \n'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 195 | fn.close() 196 | 197 | else: 198 | self._load_models(eval_model_list, mode) 199 | 200 | print('Evaluating with mode: {} | dataset: {}'.format(mode, self.testing_set_name)) 201 | print('\tEvaluation range min: {} | max: {}'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 202 | 203 | total_loss = 0. 204 | count = 0 205 | 206 | predTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 207 | grndTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 208 | imgTensor = torch.zeros((1, 3, self.H, self.W)).to('cpu') 209 | extTensor = torch.zeros((1, 6)).to('cpu') 210 | idx = 0 211 | 212 | with torch.no_grad(): 213 | for sample_dict in self.testing_dataloader: 214 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 215 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 216 | 217 | if "intrinsic" in sample_dict.keys(): 218 | # for ScanNet only 219 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 220 | focal_length = intrinsic_para[:, :2] 221 | p_pt = intrinsic_para[:, 2:] 222 | else: 223 | # for interiorNet 224 | focal_length = 300 225 | p_pt = (120, 160) 226 | 227 | extrinsic_channel = get_extrinsic_channel(imageTensor, focal_length, p_pt, extrinsic_para, self.CEILING_HEIGHT) 228 | imageTensor_C = torch.cat((imageTensor, extrinsic_channel), dim=1) 229 | valid_mask = np.logical_and(depthGTTensor >= self.EVAL_DEPTH_MIN, depthGTTensor <= self.EVAL_DEPTH_MAX) 230 | 231 | idx += imageTensor.shape[0] 232 | print('epoch {}: have processed {} number samples in {} set'.format(mode, str(idx), set_name)) 233 | imageTensor_C = imageTensor_C.to(self.device) 234 | depthGTTensor = depthGTTensor.to(self.device) # real depth 235 | 236 | if self.is_train and self.use_apex: 237 | with amp.disable_casts(): 238 | predDepth = self.depthEstModel(imageTensor_C)[-1].detach().to('cpu') 239 | else: 240 | predDepth = self.depthEstModel(imageTensor_C)[-1].detach().to('cpu') 241 | 242 | # recover real depth 243 | predDepth = ((predDepth + 1.0) * 0.5 * (self.MAX_DEPTH_CLIP - self.MIN_DEPTH_CLIP)) + self.MIN_DEPTH_CLIP 244 | 245 | depthGTTensor = depthGTTensor.detach().to('cpu') 246 | predTensor = torch.cat((predTensor, predDepth), dim=0) 247 | grndTensor = torch.cat((grndTensor, depthGTTensor), dim=0) 248 | imgTensor = torch.cat((imgTensor, imageTensor.to('cpu')), dim=0) 249 | extTensor = torch.cat((extTensor, extrinsic_para), dim=0) 250 | 251 | if isinstance(mode, int) and self.is_train: 252 | eval_depth_loss = self.L1Loss(predDepth[valid_mask], depthGTTensor[valid_mask]) 253 | total_loss += eval_depth_loss.detach().cpu() 254 | 255 | count += 1 256 | 257 | if isinstance(mode, int) and self.is_train: 258 | validation_loss = (total_loss / count) 259 | 260 | results_nyu = Result(mask_min=self.EVAL_DEPTH_MIN, mask_max=self.EVAL_DEPTH_MAX) 261 | results_nyu.evaluate(predTensor[1:], grndTensor[1:]) 262 | individual_results = results_nyu.individual_results(predTensor[1:], grndTensor[1:]) 263 | 264 | result1 = '\tabs_rel:{:.3f}, sq_rel:{:.3f}, rmse:{:.3f}, rmse_log:{:.3f}, mae:{:.3f} '.format( 265 | results_nyu.absrel,results_nyu.sqrel,results_nyu.rmse,results_nyu.rmselog,results_nyu.mae) 266 | result2 = '\t[<1.25]:{:.3f}, [<1.25^2]:{:.3f}, [<1.25^3]::{:.3f}'.format(results_nyu.delta1,results_nyu.delta2,results_nyu.delta3) 267 | 268 | print(result1) 269 | print(result2) 270 | 271 | if isinstance(mode, int) and self.is_train: 272 | self.EVAL_all_results[str(mode)] = result1 + '\t' + result2 273 | 274 | if validation_loss.item() < self.EVAL_best_loss: 275 | self.EVAL_best_loss = validation_loss.item() 276 | self.EVAL_best_model_epoch = mode 277 | self.save_models(self.model_name, mode='best') 278 | 279 | best_model_summary = '\tCurrent eval loss {:.4f}, current best loss {:.4f}, current best model {}\n'.format(validation_loss.item(), self.EVAL_best_loss, self.EVAL_best_model_epoch) 280 | print(best_model_summary) 281 | 282 | fn = open(self.evaluate_log, 'a') 283 | fn.write(result1 + '\n') 284 | fn.write(result2 + '\n') 285 | fn.write(best_model_summary + '\n') 286 | fn.close() 287 | 288 | return_dict = {} 289 | return_dict['rgb'] = imgTensor[1:] 290 | return_dict['depth_pred'] = predTensor[1:] 291 | return_dict['depth_gt'] = grndTensor[1:] 292 | return_dict['extrinsic'] = extTensor[1:] 293 | return_dict['ind_results'] = individual_results 294 | 295 | return return_dict -------------------------------------------------------------------------------- /training/PDA_training.py: -------------------------------------------------------------------------------- 1 | import os, time, sys 2 | import torch 3 | from torch.utils.data import Dataset, DataLoader 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | from torch.optim import lr_scheduler 7 | import torch.nn.functional as F 8 | from torch.autograd import Variable 9 | 10 | import torchvision 11 | from torchvision import datasets, models, transforms 12 | from torchvision.utils import make_grid 13 | 14 | from models.T2Net import _UNetGenerator, init_weights 15 | 16 | from utils.metrics import * 17 | from utils.pda_aug import warp_image_depth_with_pose_augmentation 18 | 19 | from training.base_model import set_requires_grad, base_model 20 | 21 | try: 22 | from apex import amp 23 | except ImportError: 24 | raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run with apex.") 25 | 26 | class PDA_training_model(base_model): 27 | def __init__(self, args, training_dataloader, testing_dataloader): 28 | super(PDA_training_model, self).__init__(args) 29 | self._initialize_training() 30 | 31 | self.training_dataloader = training_dataloader 32 | self.testing_dataloader = testing_dataloader 33 | self.MIN_DEPTH_CLIP = 1.0 34 | self.MAX_DEPTH_CLIP = 10.0 35 | 36 | self.EVAL_DEPTH_MIN = 1.0 37 | self.EVAL_DEPTH_MAX = 10.0 38 | 39 | self.CEILING_HEIGHT = 3.0 40 | 41 | self.save_evaluate_steps = 10 42 | 43 | self.depthEstModel = _UNetGenerator(input_nc = 3, output_nc = 1) 44 | self.model_name = ['depthEstModel'] 45 | 46 | if self.is_train: 47 | self.depth_optimizer = optim.Adam(self.depthEstModel.parameters(), lr=self.base_lr, betas=(0.5, 0.999)) 48 | self.optim_name = ['depth_optimizer'] 49 | self._get_scheduler() 50 | self.L1Loss = nn.L1Loss() 51 | self._initialize_networks(['depthEstModel']) 52 | 53 | # apex can only be applied to CUDA models 54 | if self.use_apex: 55 | self._init_apex(Num_losses=2) 56 | 57 | self.EVAL_best_loss = float('inf') 58 | self.EVAL_best_model_epoch = 0 59 | self.EVAL_all_results = {} 60 | 61 | self._check_parallel() 62 | 63 | def _get_project_name(self): 64 | return 'PDA_training_model' 65 | 66 | def _initialize_networks(self, model_name): 67 | for name in model_name: 68 | getattr(self, name).train().to(self.device) 69 | init_weights(getattr(self, name), net_name=name, init_type='normal', gain=0.02) 70 | 71 | def compute_depth_loss(self, rgb, gt_depth, mask=None): 72 | predicted_depth = self.depthEstModel(rgb.detach())[-1] 73 | 74 | if mask is not None: 75 | loss = self.L1Loss(predicted_depth[mask], gt_depth[mask]) 76 | 77 | else: 78 | loss = self.L1Loss(predicted_depth, gt_depth) 79 | 80 | return loss 81 | 82 | def train(self): 83 | phase = 'train' 84 | since = time.time() 85 | best_loss = float('inf') 86 | 87 | self.train_display_freq = len(self.training_dataloader) 88 | 89 | tensorboardX_iter_count = 0 90 | for epoch in range(self.total_epoch_num): 91 | print('\nEpoch {}/{}'.format(epoch+1, self.total_epoch_num)) 92 | print('-' * 10) 93 | fn = open(self.train_log,'a') 94 | fn.write('\nEpoch {}/{}\n'.format(epoch+1, self.total_epoch_num)) 95 | fn.write('--'*5+'\n') 96 | fn.close() 97 | 98 | self._set_models_train(['depthEstModel']) 99 | 100 | # Iterate over data. 101 | iterCount = 0 102 | 103 | for sample_dict in self.training_dataloader: 104 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 105 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 106 | if "intrinsic" in sample_dict.keys(): 107 | # for ScanNet only 108 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 109 | focal_length = intrinsic_para[:, :2] 110 | p_pt = intrinsic_para[:, 2:] 111 | else: 112 | # for interiorNet 113 | focal_length = 300 114 | p_pt = (120, 160) 115 | 116 | warp_return_dict = warp_image_depth_with_pose_augmentation(imageTensor, depthGTTensor, extrinsic_para, focal_length, p_pt, self.training_set_name, augmentation=sample_dict['augmentation']) 117 | imageTensorWarped, depthGTTensorWarped = warp_return_dict['image_warped'], warp_return_dict['depth_warped'] 118 | 119 | imageTensor = imageTensor.to(self.device) 120 | depthGTTensor = depthGTTensor.to(self.device) # [B_size, 1, 240, 320] 121 | valid_mask = (depthGTTensor >= -1.) & (depthGTTensor <= 1.) 122 | 123 | imageTensorWarped = imageTensorWarped.to(self.device) 124 | depthGTTensorWarped = depthGTTensorWarped.to(self.device) # [B_size, 1, 240, 320] 125 | warped_valid_mask = (depthGTTensorWarped >= -1.) & (depthGTTensorWarped <= 1.) 126 | 127 | with torch.set_grad_enabled(phase=='train'): 128 | 129 | total_loss = 0. 130 | ############# train the depthEstimator 131 | self.depth_optimizer.zero_grad() 132 | depth_loss = self.compute_depth_loss(imageTensor, depthGTTensor, valid_mask) 133 | total_loss += depth_loss 134 | 135 | warped_depth_loss = self.compute_depth_loss(imageTensorWarped, depthGTTensorWarped, warped_valid_mask) 136 | total_loss += warped_depth_loss 137 | 138 | if self.use_apex: 139 | with amp.scale_loss(total_loss, self.depth_optimizer) as total_loss_scaled: 140 | total_loss_scaled.backward() 141 | else: 142 | total_loss.backward() 143 | 144 | self.depth_optimizer.step() 145 | 146 | iterCount += 1 147 | if iterCount % 20 == 0: 148 | loss_name = ['total_loss', 'depth_loss', 'warped_depth_loss'] 149 | loss_value = [total_loss, depth_loss, warped_depth_loss] 150 | self.print_and_write_loss_summary(iterCount, len(self.training_dataloader), loss_name, loss_value, self.train_log) 151 | 152 | # take step in optimizer 153 | for scheduler in self.scheduler_list: 154 | scheduler.step() 155 | for optim in self.optim_name: 156 | lr = getattr(self, optim).param_groups[0]['lr'] 157 | lr_update = 'Epoch {}/{} finished: {} learning rate = {:7f}'.format(epoch+1, self.total_epoch_num, optim, lr) 158 | print(lr_update) 159 | 160 | fn = open(self.train_log,'a') 161 | fn.write(lr_update + '\n') 162 | fn.close() 163 | 164 | if (epoch+1) % self.save_evaluate_steps == 0: 165 | self.save_models(self.model_name, mode=epoch+1) 166 | _ = self.evaluate(epoch+1) 167 | 168 | time_elapsed = time.time() - since 169 | print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) 170 | 171 | fn = open(self.train_log,'a') 172 | fn.write('Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)) 173 | fn.close() 174 | 175 | best_model_summary = 'Overall best model is epoch {}'.format(self.EVAL_best_model_epoch) 176 | print(best_model_summary) 177 | print(self.EVAL_all_results[str(self.EVAL_best_model_epoch)]) 178 | fn = open(self.evaluate_log, 'a') 179 | fn.write(best_model_summary + '\n') 180 | fn.write(self.EVAL_all_results[str(self.EVAL_best_model_epoch)] + '\n') 181 | fn.close() 182 | 183 | def evaluate(self, mode): 184 | ''' 185 | mode choose from or best 186 | is the number of epoch, represents the number of epoch, used for in training evaluation 187 | 'best' is used for after training mode 188 | ''' 189 | 190 | set_name = 'test' 191 | eval_model_list = ['depthEstModel'] 192 | 193 | if isinstance(mode, int) and self.is_train: 194 | self._set_models_eval(eval_model_list) 195 | if self.EVAL_best_loss == float('inf'): 196 | fn = open(self.evaluate_log, 'w') 197 | else: 198 | fn = open(self.evaluate_log, 'a') 199 | 200 | fn.write('Evaluating with mode: {} | dataset: {} \n'.format(mode, self.testing_set_name)) 201 | fn.write('\tEvaluation range min: {} | max: {} \n'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 202 | fn.close() 203 | 204 | else: 205 | self._load_models(eval_model_list, mode) 206 | 207 | print('Evaluating with mode: {} | dataset: {}'.format(mode, self.testing_set_name)) 208 | print('\tEvaluation range min: {} | max: {}'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 209 | 210 | total_loss = 0. 211 | count = 0 212 | 213 | predTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 214 | grndTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 215 | imgTensor = torch.zeros((1, 3, self.H, self.W)).to('cpu') 216 | extTensor = torch.zeros((1, 6)).to('cpu') 217 | idx = 0 218 | 219 | # tensorboardX_iter_count = 0 220 | with torch.no_grad(): 221 | for sample_dict in self.testing_dataloader: 222 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 223 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 224 | 225 | if "intrinsic" in sample_dict.keys(): 226 | # for ScanNet only 227 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 228 | focal_length = intrinsic_para[:, :2] 229 | p_pt = intrinsic_para[:, 2:] 230 | else: 231 | # for interiorNet 232 | focal_length = 300 233 | p_pt = (120, 160) 234 | 235 | valid_mask = np.logical_and(depthGTTensor >= self.EVAL_DEPTH_MIN, depthGTTensor <= self.EVAL_DEPTH_MAX) 236 | 237 | idx += imageTensor.shape[0] 238 | print('epoch {}: have processed {} number samples in {} set'.format(mode, str(idx), set_name)) 239 | imageTensor = imageTensor.to(self.device) 240 | depthGTTensor = depthGTTensor.to(self.device) # real depth 241 | 242 | if self.is_train and self.use_apex: 243 | with amp.disable_casts(): 244 | predDepth = self.depthEstModel(imageTensor)[-1].detach().to('cpu') 245 | else: 246 | predDepth = self.depthEstModel(imageTensor)[-1].detach().to('cpu') 247 | 248 | # recover real depth 249 | predDepth = ((predDepth + 1.0) * 0.5 * (self.MAX_DEPTH_CLIP - self.MIN_DEPTH_CLIP)) + self.MIN_DEPTH_CLIP 250 | 251 | depthGTTensor = depthGTTensor.detach().to('cpu') 252 | predTensor = torch.cat((predTensor, predDepth), dim=0) 253 | grndTensor = torch.cat((grndTensor, depthGTTensor), dim=0) 254 | imgTensor = torch.cat((imgTensor, imageTensor.to('cpu')), dim=0) 255 | extTensor = torch.cat((extTensor, extrinsic_para), dim=0) 256 | 257 | if isinstance(mode, int) and self.is_train: 258 | eval_depth_loss = self.L1Loss(predDepth[valid_mask], depthGTTensor[valid_mask]) 259 | total_loss += eval_depth_loss.detach().cpu() 260 | 261 | count += 1 262 | 263 | if isinstance(mode, int) and self.is_train: 264 | validation_loss = (total_loss / count) 265 | 266 | results_nyu = Result(mask_min=self.EVAL_DEPTH_MIN, mask_max=self.EVAL_DEPTH_MAX) 267 | results_nyu.evaluate(predTensor[1:], grndTensor[1:]) 268 | individual_results = results_nyu.individual_results(predTensor[1:], grndTensor[1:]) 269 | 270 | result1 = '\tabs_rel:{:.3f}, sq_rel:{:.3f}, rmse:{:.3f}, rmse_log:{:.3f}, mae:{:.3f} '.format( 271 | results_nyu.absrel,results_nyu.sqrel,results_nyu.rmse,results_nyu.rmselog,results_nyu.mae) 272 | result2 = '\t[<1.25]:{:.3f}, [<1.25^2]:{:.3f}, [<1.25^3]::{:.3f}'.format(results_nyu.delta1,results_nyu.delta2,results_nyu.delta3) 273 | 274 | print(result1) 275 | print(result2) 276 | 277 | if isinstance(mode, int) and self.is_train: 278 | self.EVAL_all_results[str(mode)] = result1 + '\t' + result2 279 | 280 | if validation_loss.item() < self.EVAL_best_loss: 281 | self.EVAL_best_loss = validation_loss.item() 282 | self.EVAL_best_model_epoch = mode 283 | self.save_models(self.model_name, mode='best') 284 | 285 | best_model_summary = '\tCurrent eval loss {:.4f}, current best loss {:.4f}, current best model {}\n'.format(validation_loss.item(), self.EVAL_best_loss, self.EVAL_best_model_epoch) 286 | print(best_model_summary) 287 | 288 | fn = open(self.evaluate_log, 'a') 289 | fn.write(result1 + '\n') 290 | fn.write(result2 + '\n') 291 | fn.write(best_model_summary + '\n') 292 | fn.close() 293 | 294 | return_dict = {} 295 | return_dict['rgb'] = imgTensor[1:] 296 | return_dict['depth_pred'] = predTensor[1:] 297 | return_dict['depth_gt'] = grndTensor[1:] 298 | return_dict['extrinsic'] = extTensor[1:] 299 | return_dict['ind_results'] = individual_results 300 | 301 | return return_dict -------------------------------------------------------------------------------- /utils/pda_aug.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn.functional as F 4 | 5 | def _get_intrinsic_matrix(focal_length, p_pt, batch_size): 6 | K = torch.zeros((batch_size, 9), dtype=torch.float32) 7 | K[:, -1] = 1. 8 | if isinstance(focal_length, (int, float)): 9 | # suggest fx = fy for all samples 10 | K[:, 0] = focal_length 11 | K[:, 4] = focal_length 12 | elif isinstance(focal_length, (list, tuple)): 13 | # suggest fx, fy for all samples 14 | K[:, 0] = focal_length[0] 15 | K[:, 4] = focal_length[1] 16 | elif torch.is_tensor(focal_length): 17 | if focal_length.dim() == 1: 18 | # suggest fx = fy for indivdual sample 19 | K[:, 0] = focal_length 20 | K[:, 4] = focal_length 21 | elif focal_length.dim() == 2: 22 | # suggest fx, fy for indivdual sample 23 | K[:, 0] = focal_length[:, 0] 24 | K[:, 4] = focal_length[:, 1] 25 | else: 26 | raise ValueError('focal length tensor has to have shape of [B, ] or [B, 2]') 27 | else: 28 | raise ValueError('focal length variable should be either int/float, list/tuple or tensor of size [B, ]/[B, 2]') 29 | 30 | if isinstance(p_pt, (list, tuple)): 31 | K[:, 2] = p_pt[1] 32 | K[:, 5] = p_pt[0] 33 | elif torch.is_tensor(p_pt): 34 | assert p_pt.dim() == 2 35 | K[:, 2] = p_pt[:, 1] 36 | K[:, 5] = p_pt[:, 0] 37 | else: 38 | raise ValueError('principle point variable should be either list/tuple or tensor of size [B, 2]') 39 | return K.reshape(batch_size, 3, 3) 40 | 41 | def _get_inverse_intrinsic_matrix(K): 42 | ''' 43 | K is a tensor with shape [B, 3, 3] 44 | ''' 45 | K_inv = torch.zeros_like(K, dtype=torch.float32) 46 | K_inv[:, 0, 0] = 1. / K[:, 0, 0] 47 | K_inv[:, 1, 1] = 1. / K[:, 1, 1] 48 | K_inv[:, 0, 2] = -K[:, 0, 2] / K[:, 0, 0] 49 | K_inv[:, 1, 2] = -K[:, 1, 2] / K[:, 1, 1] 50 | K_inv[:, -1, -1] = 1. 51 | return K_inv 52 | 53 | def _convert_depth_for_projection(depthGT, dataset, method, is_train=True, MAX_DEPTH_CLIP_GIVEN=None, MIN_DEPTH_CLIP_GIVEN=None): 54 | ''' 55 | default setting: dataset: interiorNet, method: vanilla 56 | ''' 57 | # setting MAX_DEPTH_CLIP, MIN_DEPTH_CLIP 58 | if dataset == 'interiorNet': 59 | MAX_DEPTH_CLIP=10.0 60 | MIN_DEPTH_CLIP=1.0 61 | elif dataset == 'ScanNet': 62 | MAX_DEPTH_CLIP=10.0 63 | MIN_DEPTH_CLIP=1.0 64 | else: 65 | raise RuntimeError('Current only support interiorNet | ScanNet!') 66 | 67 | # overwrite if MAX_DEPTH_CLIP or MIN_DEPTH_CLIP is given 68 | if MAX_DEPTH_CLIP_GIVEN is not None: 69 | MAX_DEPTH_CLIP = MAX_DEPTH_CLIP_GIVEN 70 | if MIN_DEPTH_CLIP_GIVEN is not None: 71 | MIN_DEPTH_CLIP = MIN_DEPTH_CLIP_GIVEN 72 | 73 | if method == 'vanilla': 74 | depthGT = ((depthGT * 0.5) + 0.5) * (MAX_DEPTH_CLIP - MIN_DEPTH_CLIP) + MIN_DEPTH_CLIP 75 | else: 76 | raise RuntimeError('Current only support vanilla') 77 | 78 | return depthGT 79 | 80 | def _rescale_depth_for_training(depthGT, dataset, method, is_train=True, MAX_DEPTH_CLIP_GIVEN=None, MIN_DEPTH_CLIP_GIVEN=None): 81 | # setting MAX_DEPTH_CLIP, MIN_DEPTH_CLIP 82 | if dataset == 'interiorNet': 83 | MAX_DEPTH_CLIP=10.0 84 | MIN_DEPTH_CLIP=1.0 85 | elif dataset == 'ScanNet': 86 | MAX_DEPTH_CLIP=10.0 87 | MIN_DEPTH_CLIP=1.0 88 | else: 89 | raise RuntimeError('Current only support interiorNet | ScanNet!') 90 | 91 | # overwrite if MAX_DEPTH_CLIP or MIN_DEPTH_CLIP is given 92 | if MAX_DEPTH_CLIP_GIVEN is not None: 93 | MAX_DEPTH_CLIP = MAX_DEPTH_CLIP_GIVEN 94 | if MIN_DEPTH_CLIP_GIVEN is not None: 95 | MIN_DEPTH_CLIP = MIN_DEPTH_CLIP_GIVEN 96 | 97 | if method == 'vanilla': 98 | depthGT = (((depthGT - MIN_DEPTH_CLIP) / (MAX_DEPTH_CLIP - MIN_DEPTH_CLIP)) - 0.5) * 2.0 99 | else: 100 | raise RuntimeError('Current only support vanilla!') 101 | 102 | return depthGT 103 | 104 | def _compute_distance_map(pc, H, W): 105 | B = pc.shape[0] 106 | return torch.sqrt(pc[:, 0]**2 + pc[:, 1]**2 + pc[:, 2]**2).reshape(B, 1, H, W) 107 | 108 | def _distance_2_depth(distance_map, K_inv): 109 | B, H, W = distance_map.shape[0], distance_map.shape[2], distance_map.shape[3] 110 | 111 | grid_y, grid_x = np.mgrid[0:H, 0:W] 112 | grid_y, grid_x = torch.tensor(grid_y, dtype=torch.float32), torch.tensor(grid_x, dtype=torch.float32) 113 | q = torch.stack((grid_x.reshape(-1), grid_y.reshape(-1), torch.ones_like(grid_x.reshape(-1))), dim=0).unsqueeze(0).expand(B, 3, H*W) 114 | 115 | pc = torch.bmm(K_inv, q) 116 | 117 | denom = torch.sqrt(pc[:, 0]**2 + pc[:, 1]**2 + 1) # [B, N] 118 | depth_map = distance_map.reshape(B, -1) / denom 119 | depth_map = depth_map.reshape(B, 1, H, W) 120 | 121 | return depth_map 122 | 123 | def warp_image_depth_with_pose_augmentation(image, depthGT, pose, focal_length, p_pt, training_dataset_name, 124 | pose_perturbed=None, MAX_DEPTH_CLIP=10.0, MIN_DEPTH_CLIP=1.0, method='vanilla', 125 | is_train=True, include_depth_warp=True, augmentation=None, pose_sample_mode='uniform'): 126 | ''' 127 | depthGT is the depth tensor with shape [B, 1, H, W] -- need real depth ! 128 | ''' 129 | return_dict = {} 130 | # first flip back augmented sample since the pose is still for the original sample 131 | if augmentation is not None: 132 | image[augmentation] = torch.flip(image[augmentation], [1, 3]) 133 | depthGT[augmentation] = torch.flip(depthGT[augmentation], [1, 3]) 134 | 135 | dataset = training_dataset_name.split('_')[0] 136 | # depth is preprocess in the range [-1., 1.], we need abs scale depth for reprojection 137 | depthGT = _convert_depth_for_projection(depthGT, dataset, method, is_train) 138 | depthGT[depthGT < 1e-6] = 1e6 # to filter close to 0 depth 139 | 140 | B, H, W = image.shape[0], image.shape[2], image.shape[3] 141 | K = _get_intrinsic_matrix(focal_length, p_pt, B) 142 | K_inv = _get_inverse_intrinsic_matrix(K) 143 | 144 | translation_sampler, euler_sampler = sample_pose_perturbance(pose, dataset, mode=pose_sample_mode) 145 | pc = image_to_pointcloud(depthGT, K_inv, homogeneous_coord=True) 146 | dist_map = _compute_distance_map(pc, H, W) 147 | 148 | if pose_perturbed is None: 149 | # random perturb pose if the new pose is not given 150 | pose_perturbed = pose.clone() 151 | pose_perturbed[:, :3] += translation_sampler.squeeze(1) 152 | pose_perturbed[:, 3:] += euler_sampler.squeeze(1) 153 | 154 | pc_perturbed_depth, pc_perturbed_image = _transform_pc_with_poses(pc, pose, pose_perturbed) 155 | pixel_coords_perturbed = pointcloud_to_pixel_coords(pc_perturbed_image, K, image) 156 | image_warped = F.grid_sample(image, pixel_coords_perturbed, padding_mode="border") 157 | return_dict['image_warped'] = image_warped 158 | if include_depth_warp: 159 | # depth_warped is the augmented depth we used in all training 160 | dist_warped = F.grid_sample(dist_map, pixel_coords_perturbed) 161 | depth_warped = _distance_2_depth(dist_warped, K_inv) # no translation in our case, convert depth to distance to get rid of grid artifacts 162 | depth_warped[depth_warped > 1e3] = 0. 163 | 164 | depth_reproject = pointcloud_to_depth_maps(pc_perturbed_depth, K, image) 165 | depth_reproject[depth_reproject > 1e3] = 0. 166 | 167 | # now convert the reproject depth back to [-1., 1.] to continue training 168 | depth_reproject = _rescale_depth_for_training(depth_reproject, dataset, method, is_train) 169 | if include_depth_warp: 170 | depth_warped = _rescale_depth_for_training(depth_warped, dataset, method, is_train) 171 | return_dict['depth_warped'] = depth_warped 172 | 173 | return_dict['depth_reproject'] = depth_reproject 174 | return_dict['pose_perturbed'] = pose_perturbed 175 | 176 | # make lrflip for augmented samples 177 | if augmentation is not None: 178 | return_dict['image_warped'][augmentation] = torch.flip(return_dict['image_warped'][augmentation], [1, 3]) 179 | return_dict['depth_reproject'][augmentation] = torch.flip(return_dict['depth_reproject'][augmentation], [1, 3]) 180 | if include_depth_warp: 181 | return_dict['depth_warped'][augmentation] = torch.flip(return_dict['depth_warped'][augmentation], [1, 3]) 182 | 183 | return return_dict 184 | 185 | def sample_pose_perturbance(pose, dataset, upper=0.1, lower=-0.1, mode='uniform'): 186 | ''' 187 | input pose: tensor with shape [B, 6] 188 | return pose_perturbed: pose with shape [B, 6] 189 | ''' 190 | B = pose.shape[0] 191 | if mode == 'uniform': 192 | translation_sampler = torch.rand((B, 1, 3)) * 0. 193 | euler_sampler = torch.zeros((B, 1, 3), dtype=torch.float32).uniform_(lower, upper) 194 | # add more sampling mode as you like~ 195 | return translation_sampler, euler_sampler 196 | 197 | def image_to_pointcloud(depth, K_inv, homogeneous_coord=False): 198 | assert depth.dim() == 4 199 | assert depth.size(1) == 1 200 | 201 | B, H, W = depth.shape[0], depth.shape[2], depth.shape[3] 202 | depth_v = depth.reshape(B, 1, -1) 203 | 204 | grid_y, grid_x = np.mgrid[0:H, 0:W] 205 | grid_y, grid_x = torch.tensor(grid_y, dtype=torch.float32), torch.tensor(grid_x, dtype=torch.float32) 206 | q = torch.stack((grid_x.reshape(-1), grid_y.reshape(-1), torch.ones_like(grid_x.reshape(-1))), dim=0).unsqueeze(0).expand(B, 3, H*W) 207 | 208 | pc = torch.bmm(K_inv, q) * depth_v 209 | if homogeneous_coord: 210 | pc = torch.cat((pc, torch.ones((B, 1, depth_v.shape[-1]), dtype=pc.dtype)), dim=1) 211 | return pc 212 | 213 | def pointcloud_to_pixel_coords(pc, K, image, normalization=True, eps=1e-8): 214 | B, H, W = image.shape[0], image.shape[2], image.shape[3] 215 | pc = pc[:, :3, :] 216 | pc = pc / (pc[:, -1, :].unsqueeze(1) + eps) 217 | p_coords = torch.bmm(K, pc) 218 | p_coords = p_coords[:, :2, :] 219 | if normalization: 220 | p_coords_n = torch.zeros_like(p_coords, dtype=torch.float32) 221 | p_coords_n[:, 0, :] = p_coords[:, 0, :] / (W - 1.) 222 | p_coords_n[:, 1, :] = p_coords[:, 1, :] / (H - 1.) 223 | p_coords_n = (p_coords_n - 0.5) * 2. 224 | u_proj_mask = ((p_coords_n[:, 0, :] > 1) + (p_coords_n[:, 0, :] < -1)) 225 | p_coords_n[:, 0, :][u_proj_mask] = 2 226 | v_proj_mask = ((p_coords_n[:, 1, :] > 1) + (p_coords_n[:, 1, :] < -1)) 227 | p_coords_n[:, 1, :][v_proj_mask] = 2 228 | 229 | p_coords_n = p_coords_n.reshape(B, 2, H, W).permute(0, 2, 3, 1) 230 | return p_coords_n 231 | else: 232 | return p_coords 233 | 234 | def pointcloud_to_depth_maps(pc, K, image, eps=1e-7): 235 | B, H, W = image.shape[0], image.shape[2], image.shape[3] 236 | pc = pc[:, :3, :] 237 | zt = pc[:, 2, :] 238 | pc = pc / (pc[:, -1, :].unsqueeze(1) + eps) 239 | p_coords = torch.bmm(K, pc) 240 | p_coords = p_coords[:, :2, :] 241 | xt, yt = p_coords[:, 0, :].type(torch.long), p_coords[:, 1, :].type(torch.long) 242 | keep = (yt < H) & (yt >= 0) & (xt < W) & (xt >= 0) 243 | depth_map = torch.zeros((B, H, W), dtype=torch.float32) 244 | depth_map[:, yt[keep], xt[keep]] = zt[keep] 245 | return depth_map.unsqueeze(1) 246 | 247 | def _transform_pc_with_poses(pc, pose1, pose2): 248 | B = pose1.shape[0] 249 | 250 | R_1 = torch.bmm(Rotz(pose1[:, 5]), torch.bmm(Roty(pose1[:, 4]), Rotx(pose1[:, 3]))) 251 | t_1 = pose1[:, :3].reshape(-1, 3, 1) 252 | 253 | 254 | R_2 = torch.bmm(Rotz(pose2[:, 5]), torch.bmm(Roty(pose2[:, 4]), Rotx(pose2[:, 3]))) 255 | t_2 = pose2[:, :3].reshape(-1, 3, 1) 256 | 257 | R0 = torch.eye(3).unsqueeze(0).expand(B, 3, 3) 258 | R0[:, 0, 0] = -1 # handedness is different than our camera model 259 | R_1 = torch.bmm(R_1, R0) 260 | R_2 = torch.bmm(R_2, R0) 261 | 262 | cam_coord = pc[:, :3, :] 263 | 264 | cam_coord_depth = R_2.transpose(1, 2)@R_1@cam_coord + R_2.transpose(1, 2)@(t_2-t_1) 265 | cam_coord_rgb = R_1.transpose(1, 2)@R_2@cam_coord + R_1.transpose(1, 2)@(t_1-t_2) 266 | 267 | return cam_coord_depth, cam_coord_rgb 268 | 269 | def Rotx(t): 270 | """ 271 | Rotation about the x-axis. 272 | np.array([[1, 0, 0], [0, c, -s], [0, s, c]]) 273 | 274 | -- input t shape B x 1 275 | -- return B x 3 x 3 276 | """ 277 | B = t.shape[0] 278 | Rx = torch.zeros((B, 9, 1), dtype=torch.float) 279 | 280 | c = torch.cos(t) 281 | s = torch.sin(t) 282 | ones = torch.ones(B) 283 | 284 | Rx[:, 0, 0] = ones 285 | Rx[:, 4, 0] = c 286 | Rx[:, 5, 0] = -s 287 | Rx[:, 7, 0] = s 288 | Rx[:, 8, 0] = c 289 | 290 | Rx = Rx.reshape(B, 3, 3) 291 | 292 | return Rx 293 | 294 | 295 | def Roty(t): 296 | """ 297 | Rotation about the x-axis. 298 | np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) 299 | 300 | -- input t shape B x 1 301 | -- return B x 3 x 3 302 | """ 303 | B = t.shape[0] 304 | Ry = torch.zeros((B, 9, 1), dtype=torch.float) 305 | 306 | c = torch.cos(t) 307 | s = torch.sin(t) 308 | ones = torch.ones(B) 309 | 310 | Ry[:, 0, 0] = c 311 | Ry[:, 2, 0] = s 312 | Ry[:, 4, 0] = ones 313 | Ry[:, 6, 0] = -s 314 | Ry[:, 8, 0] = c 315 | 316 | Ry = Ry.reshape(B, 3, 3) 317 | 318 | return Ry 319 | 320 | def Rotz(t): 321 | """ 322 | Rotation about the z-axis. 323 | np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) 324 | 325 | -- input t shape B x 1 326 | -- return B x 3 x 3 327 | """ 328 | B = t.shape[0] 329 | Rz = torch.zeros((B, 9, 1), dtype=torch.float) 330 | 331 | c = torch.cos(t) 332 | s = torch.sin(t) 333 | ones = torch.ones(B) 334 | 335 | Rz[:, 0, 0] = c 336 | Rz[:, 1, 0] = -s 337 | Rz[:, 3, 0] = s 338 | Rz[:, 4, 0] = c 339 | Rz[:, 8, 0] = ones 340 | 341 | Rz = Rz.reshape(B, 3, 3) 342 | 343 | return Rz -------------------------------------------------------------------------------- /training/base_model.py: -------------------------------------------------------------------------------- 1 | import os, copy 2 | import datetime 3 | import torch 4 | from torch.utils.data import Dataset, DataLoader 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | import torch.nn.functional as F 9 | from torch.autograd import Variable 10 | from collections import OrderedDict 11 | 12 | import torchvision 13 | from torchvision import datasets, models, transforms 14 | from torchvision.utils import make_grid 15 | # from tensorboardX import SummaryWriter 16 | 17 | from utils.metrics import * 18 | 19 | try: 20 | from apex import amp 21 | except ImportError: 22 | raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run with apex.") 23 | 24 | import torch.multiprocessing as mp 25 | 26 | def set_requires_grad(nets, requires_grad=False): 27 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 28 | Parameters: 29 | nets (network list) -- a list of networks 30 | requires_grad (bool) -- whether the networks require gradients or not 31 | """ 32 | if not isinstance(nets, list): 33 | nets = [nets] 34 | for net in nets: 35 | if net is not None: 36 | for param in net.parameters(): 37 | param.requires_grad = requires_grad 38 | 39 | def apply_scheduler(optimizer, lr_policy, num_epoch=None, total_num_epoch=None): 40 | if lr_policy == 'linear': 41 | # num_epoch with initial lr 42 | # rest of epoch linearly decrease to 0 (the last epoch is not 0) 43 | def lambda_rule(epoch): 44 | # lr_l = 1.0 - max(0, epoch + 1 + epoch_count - niter) / float(niter_decay + 1) 45 | lr_l = 1.0 - max(0, epoch + 1 - num_epoch) / float(total_num_epoch - num_epoch + 1) 46 | return lr_l 47 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 48 | elif lr_policy == 'step': 49 | scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5) 50 | elif lr_policy == 'plateau': 51 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 52 | else: 53 | return NotImplementedError('learning rate policy [%s] is not implemented', lr_policy) 54 | return scheduler 55 | 56 | class base_model(nn.Module): 57 | def __init__(self, args): 58 | super(base_model, self).__init__() 59 | self.device = args.device 60 | self.is_train = args.is_train 61 | self.training_set_name = args.training_set_name 62 | self.testing_set_name = args.testing_set_name 63 | self.project_name = args.project_name 64 | self.exp_dir = args.exp_dir 65 | 66 | self.use_tensorboardX = False 67 | self.use_apex = True 68 | 69 | self.sampleSize = args.sampleSize # patch size for training the model. Default: [240, 320] 70 | self.H, self.W = self.sampleSize[0], self.sampleSize[1] 71 | self.batch_size = args.batch_size 72 | self.total_epoch_num = args.total_epoch_num # total number of epoch in training 73 | self.base_lr = args.base_lr # base learning rate 74 | 75 | # self.save_result_npy = True # whether to save intermediate results as npy array 76 | 77 | def _initialize_training(self): 78 | if self.project_name is not None: 79 | self.save_dir = os.path.join(self.exp_dir, self.project_name) 80 | else: 81 | self.project_name = self._get_project_name() 82 | self.project_name = self.project_name + '_' + self.training_set_name 83 | self.save_dir = os.path.join(self.exp_dir, self.project_name) 84 | 85 | project_name_info = 'project name: {}'.format(self.project_name) 86 | save_dir_info = 'save dir: {}'.format(self.save_dir) 87 | dataset_info = 'training: {}\ntesting: {}'.format(self.training_set_name, self.testing_set_name) 88 | if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) 89 | 90 | if self.is_train: 91 | today = datetime.datetime.today() 92 | self.train_log = os.path.join(self.save_dir, 'train_{}.log'.format(today.strftime('%m-%d-%Y'))) 93 | 94 | fn = open(self.train_log, 'w') 95 | fn.write(project_name_info + '\n') 96 | fn.write(save_dir_info + '\n') 97 | fn.write(dataset_info + '\n') 98 | fn.close() 99 | 100 | self.evaluate_log = os.path.join(self.save_dir, 'evaluate_{}.log'.format(today.strftime('%m-%d-%Y'))) 101 | else: 102 | self.evaluate_log = os.path.join(self.save_dir, 'evaluate_sep.log') 103 | 104 | print(project_name_info) 105 | print(save_dir_info) 106 | print(dataset_info) 107 | 108 | if self.use_tensorboardX: 109 | self.tensorboard_train_dir = os.path.join(self.save_dir, 'tensorboardX_train_logs') 110 | self.train_SummaryWriter = SummaryWriter(self.tensorboard_train_dir) 111 | self.tensorboard_eval_dir = os.path.join(self.save_dir, 'tensorboardX_eval_logs') 112 | self.eval_SummaryWriter = SummaryWriter(self.tensorboard_eval_dir) 113 | self.tensorboard_num_display_per_epoch = 5 114 | self.val_display_freq = 10 115 | 116 | def _initialize_networks(self): 117 | for name, model in self.model_dict.items(): 118 | model.train().to(self.device) 119 | # init_weights(model, net_name=name, init_type='normal', init_gain=0.02) 120 | init_weights(model, net_name=name, init_type='normal', gain=0.02) 121 | 122 | def _get_scheduler(self, optim_type='linear'): 123 | ''' 124 | if type is None -> all optim use default scheduler 125 | if types is str -> all optim use this types of scheduler 126 | if type is list -> each optim use their own scheduler 127 | ''' 128 | self.scheduler_list = [] 129 | if isinstance(optim_type, str): 130 | for name in self.optim_name: 131 | self.scheduler_list.append(apply_scheduler(getattr(self, name), lr_policy=optim_type, num_epoch=0.6*self.total_epoch_num, 132 | total_num_epoch=self.total_epoch_num)) 133 | elif isinstance(optim_type, list): 134 | for name, optim in zip(self.optim_name, optim_type): 135 | self.scheduler_list.append(apply_scheduler(getattr(self, name), lr_policy=optim, num_epoch=0.6*self.total_epoch_num, 136 | total_num_epoch=self.total_epoch_num)) 137 | else: 138 | raise RuntimeError("optim type should be either string or list!") 139 | 140 | def _init_apex(self, Num_losses): 141 | # self.model_list, self.optim_list = amp.initialize(self.model_list, self.optim_list, opt_level="O1", num_losses=Num_losses) 142 | model_list = [] 143 | optim_list = [] 144 | for m in self.model_name: 145 | model_list.append(getattr(self, m)) 146 | for o in self.optim_name: 147 | optim_list.append(getattr(self, o)) 148 | 149 | # model_list, optim_list = amp.initialize(model_list, optim_list, opt_level="O1", keep_batchnorm_fp32=True, num_losses=Num_losses) 150 | model_list, optim_list = amp.initialize(model_list, optim_list, opt_level="O1", num_losses=Num_losses) 151 | 152 | def _check_parallel(self): 153 | if torch.cuda.device_count() > 1: 154 | for name in self.model_name: 155 | setattr(self, name, nn.DataParallel(getattr(self, name))) 156 | 157 | def _check_distribute(self): 158 | if torch.cuda.device_count() > 1: 159 | # world size is number of process participat in the job 160 | # torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') 161 | # mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) 162 | if use_apex: 163 | setattr(self, name, apex.parallel.DistributedDataParallel(getattr(self, name))) 164 | else: 165 | for name in self.model_name: 166 | setattr(self, name, nn.DistributedDataParallel(getattr(self, name))) 167 | 168 | def _set_models_train(self, model_name): 169 | for name in model_name: 170 | getattr(self, name).train() 171 | 172 | def _set_models_eval(self, model_name): 173 | for name in model_name: 174 | getattr(self, name).eval() 175 | 176 | def _set_models_float(self, model_name): 177 | for name in model_name: 178 | for layers in getattr(self, name).modules(): 179 | layers.float() 180 | 181 | def save_models(self, model_list, mode): 182 | ''' 183 | mode include best, latest, or a number (epoch) 184 | save as non-dataparallel state_dict 185 | ''' 186 | for model_name in model_list: 187 | if mode == 'latest': 188 | path_to_save_paramOnly = os.path.join(self.save_dir, 'latest_{}.pth'.format(model_name)) 189 | elif mode == 'best': 190 | path_to_save_paramOnly = os.path.join(self.save_dir, 'best_{}.pth'.format(model_name)) 191 | elif isinstance(mode, int): 192 | path_to_save_paramOnly = os.path.join(self.save_dir, 'epoch-{}_{}.pth'.format(str(mode), model_name)) 193 | 194 | try: 195 | state_dict = getattr(self, model_name).module.state_dict() 196 | except AttributeError: 197 | state_dict = getattr(self, model_name).state_dict() 198 | 199 | model_weights = copy.deepcopy(state_dict) 200 | torch.save(model_weights, path_to_save_paramOnly) 201 | 202 | def _load_models(self, model_list, mode, isTrain=False, model_path=None): 203 | if model_path is None: 204 | model_path = self.save_dir 205 | 206 | for model_name in model_list: 207 | if mode == 'latest': 208 | path = os.path.join(model_path, 'latest_{}.pth'.format(model_name)) 209 | elif mode == 'best': 210 | path = os.path.join(model_path, 'best_{}.pth'.format(model_name)) 211 | elif isinstance(mode, int): 212 | path = os.path.join(model_path, 'epoch-{}_{}.pth'.format(str(mode), model_name)) 213 | else: 214 | raise RuntimeError("Mode not implemented") 215 | 216 | state_dict = torch.load(path) 217 | 218 | try: 219 | getattr(self, model_name).load_state_dict(state_dict) 220 | except RuntimeError as e: 221 | print(e.message) 222 | # in the case of parallel model loading non-parallel state_dict || add module to all keys 223 | new_state_dict = OrderedDict() 224 | for k, v in state_dict.items(): 225 | # print(k) 226 | # print('module.' + k) 227 | name = 'module.' + k # add `module.` 228 | new_state_dict[name] = v 229 | 230 | getattr(self, model_name).load_state_dict(new_state_dict) 231 | 232 | if isTrain: 233 | getattr(self, model_name).to(self.device).train() 234 | else: 235 | getattr(self, model_name).to(self.device).eval() 236 | 237 | def _load_models_with_different_name(self, model_list, name_list, mode, isTrain=False, model_path=None): 238 | if model_path is None: 239 | model_path = self.save_dir 240 | 241 | for model_name, name_here in zip(model_list, name_list): 242 | if mode == 'latest': 243 | path = os.path.join(model_path, 'latest_{}.pth'.format(model_name)) 244 | elif mode == 'best': 245 | path = os.path.join(model_path, 'best_{}.pth'.format(model_name)) 246 | elif isinstance(mode, int): 247 | path = os.path.join(model_path, 'epoch-{}_{}.pth'.format(str(mode), model_name)) 248 | else: 249 | raise RuntimeError("Mode not implemented") 250 | 251 | state_dict = torch.load(path) 252 | 253 | try: 254 | getattr(self, name_here).load_state_dict(state_dict) 255 | except RuntimeError: 256 | # in the case of parallel model loading non-parallel state_dict || add module to all keys 257 | new_state_dict = OrderedDict() 258 | for k, v in state_dict.items(): 259 | # print(k) 260 | # print('module.' + k) 261 | name = 'module.' + k # add `module.` 262 | new_state_dict[name] = v 263 | 264 | getattr(self, name_here).load_state_dict(new_state_dict) 265 | 266 | if isTrain: 267 | getattr(self, name_here).to(self.device).train() 268 | else: 269 | getattr(self, name_here).to(self.device).eval() 270 | 271 | def print_and_write_loss_summary(self, iterCount, totalCount, name_list, value_list, log_file): 272 | if len(name_list) != len(value_list): 273 | min_len = min(len(name_list), len(value_list)) 274 | name_list = name_list[:min_len] 275 | value_list = value_list[:min_len] 276 | 277 | loss_summary = '\t{}/{}'.format(iterCount, totalCount) 278 | for loss_name, loss_value in zip(name_list, value_list): 279 | loss_summary += ' {}: {:.4f}'.format(loss_name, loss_value) 280 | 281 | fn = open(log_file, 'a') 282 | print(loss_summary) 283 | fn.write(loss_summary + '\n') 284 | fn.close() 285 | 286 | # def save_tensor2np(self, tensor, name, epoch, path=None): 287 | # if path == None: 288 | # path = self.save_dir 289 | # # expect a 4D tensor 290 | # # count = 0 291 | # # for i in range(tensor.size(0)): 292 | # generated_sample = tensor.detach().cpu().numpy() 293 | # # section = 'rgb' 294 | # # if not rgb: 295 | # # section = 'depth' 296 | # generated_sample_save_path = os.path.join(path, 'tensor2np', 'Epoch-%s_%s.npy' % (epoch, name)) 297 | # if not os.path.exists(os.path.join(path, 'tensor2np')): 298 | # os.makedirs(os.path.join(path, 'tensor2np')) # # ./expriments/project_name/generated_samples/val/section/1_depth_generated.npy 299 | 300 | # # print(img_target.dtype, img_target.shape) torch.float32 torch.Size([3, 240, 320]) 301 | # np.save(generated_sample_save_path, generated_sample) 302 | 303 | def write_2_tensorboardX(self, writer, input_tensor, name, mode, count, nrow=None, normalize=True, value_range=(-1.0, 1.0)): 304 | # assume dict:{'name': data} 305 | if mode == 'image': 306 | if not nrow: 307 | raise RuntimeError('tensorboardX: must specify number of rows in image mode') 308 | grid = make_grid(input_tensor, nrow=nrow, normalize=normalize, range=value_range) 309 | writer.add_image(name, grid, count) 310 | elif mode == 'scalar': 311 | if isinstance(input_tensor, list) and isinstance(name, list): 312 | assert len(input_tensor) == len(name) 313 | for n, t in zip(name, input_tensor): 314 | writer.add_scalar(n, t, count) 315 | else: 316 | writer.add_scalar(name, input_tensor, count) 317 | else: 318 | raise RuntimeError('tensorboardX: this mode is not yet implemented') -------------------------------------------------------------------------------- /training/CPP_PDA_joint_training.py: -------------------------------------------------------------------------------- 1 | import os, time, sys 2 | import torch 3 | from torch.utils.data import Dataset, DataLoader 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | from torch.optim import lr_scheduler 7 | import torch.nn.functional as F 8 | from torch.autograd import Variable 9 | 10 | import torchvision 11 | from torchvision import datasets, models, transforms 12 | from torchvision.utils import make_grid 13 | 14 | from models.T2Net import _UNetGenerator, init_weights 15 | 16 | from utils.metrics import * 17 | from utils.cpp_encoding import get_extrinsic_channel 18 | from utils.pda_aug import warp_image_depth_with_pose_augmentation 19 | 20 | from training.base_model import set_requires_grad, base_model 21 | 22 | try: 23 | from apex import amp 24 | except ImportError: 25 | raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run with apex.") 26 | 27 | class CPP_PDA_joint_training_model(base_model): 28 | def __init__(self, args, training_dataloader, testing_dataloader): 29 | super(CPP_PDA_joint_training_model, self).__init__(args) 30 | self._initialize_training() 31 | 32 | self.training_dataloader = training_dataloader 33 | self.testing_dataloader = testing_dataloader 34 | self.MIN_DEPTH_CLIP = 1.0 35 | self.MAX_DEPTH_CLIP = 10.0 36 | 37 | self.EVAL_DEPTH_MIN = 1.0 38 | self.EVAL_DEPTH_MAX = 10.0 39 | 40 | self.CEILING_HEIGHT = 3.0 # predfined ceiling height 41 | 42 | self.save_evaluate_steps = 10 43 | 44 | self.depthEstModel = _UNetGenerator(input_nc = 4, output_nc = 1) 45 | self.model_name = ['depthEstModel'] 46 | 47 | if self.is_train: 48 | self.depth_optimizer = optim.Adam(self.depthEstModel.parameters(), lr=self.base_lr, betas=(0.5, 0.999)) 49 | self.optim_name = ['depth_optimizer'] 50 | self._get_scheduler() 51 | self.L1Loss = nn.L1Loss() 52 | self._initialize_networks(['depthEstModel']) 53 | 54 | # apex can only be applied to CUDA models 55 | if self.use_apex: 56 | self._init_apex(Num_losses=2) 57 | 58 | self.EVAL_best_loss = float('inf') 59 | self.EVAL_best_model_epoch = 0 60 | self.EVAL_all_results = {} 61 | 62 | self._check_parallel() 63 | 64 | def _get_project_name(self): 65 | return 'CPP_PDA_joint_training_model' 66 | 67 | def _initialize_networks(self, model_name): 68 | for name in model_name: 69 | getattr(self, name).train().to(self.device) 70 | init_weights(getattr(self, name), net_name=name, init_type='normal', gain=0.02) 71 | 72 | def compute_depth_loss(self, rgb, gt_depth, mask=None): 73 | predicted_depth = self.depthEstModel(rgb.detach())[-1] 74 | 75 | if mask is not None: 76 | loss = self.L1Loss(predicted_depth[mask], gt_depth[mask]) 77 | 78 | else: 79 | loss = self.L1Loss(predicted_depth, gt_depth) 80 | 81 | return loss 82 | 83 | def train(self): 84 | phase = 'train' 85 | since = time.time() 86 | best_loss = float('inf') 87 | 88 | self.train_display_freq = len(self.training_dataloader) 89 | 90 | tensorboardX_iter_count = 0 91 | for epoch in range(self.total_epoch_num): 92 | print('\nEpoch {}/{}'.format(epoch+1, self.total_epoch_num)) 93 | print('-' * 10) 94 | fn = open(self.train_log,'a') 95 | fn.write('\nEpoch {}/{}\n'.format(epoch+1, self.total_epoch_num)) 96 | fn.write('--'*5+'\n') 97 | fn.close() 98 | 99 | self._set_models_train(['depthEstModel']) 100 | 101 | # Iterate over data. 102 | iterCount = 0 103 | 104 | for sample_dict in self.training_dataloader: 105 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 106 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 107 | if "intrinsic" in sample_dict.keys(): 108 | # for ScanNet only 109 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 110 | focal_length = intrinsic_para[:, :2] 111 | p_pt = intrinsic_para[:, 2:] 112 | else: 113 | # for interiorNet, fixed intrincis for all data 114 | focal_length = 300 115 | p_pt = (120, 160) 116 | 117 | if 'restricted' in self.training_set_name: 118 | encoding_pitch_noise_tensor = torch.zeros(extrinsic_para.shape[0], dtype=torch.float32).uniform_(-self.ENCODING_PITCH_NOISE, self.ENCODING_PITCH_NOISE) 119 | encoding_height_noise_tensor = torch.zeros(extrinsic_para.shape[0], dtype=torch.float32).uniform_(-self.ENCODING_HEIGHT_NOISE, self.ENCODING_HEIGHT_NOISE) 120 | extrinsic_para[:, 2] += encoding_height_noise_tensor 121 | extrinsic_para[:, 3] += encoding_pitch_noise_tensor 122 | # print('noise injection~') 123 | if 'ScanNet' in self.training_set_name: 124 | encoding_roll_noise_tensor = torch.zeros(extrinsic_para.shape[0], dtype=torch.float32).uniform_(-self.ENCODING_ROLL_NOISE, self.ENCODING_ROLL_NOISE) 125 | extrinsic_para[:, 4] += encoding_roll_noise_tensor 126 | 127 | # PDA step 128 | warp_return_dict = warp_image_depth_with_pose_augmentation(imageTensor, depthGTTensor, extrinsic_para, focal_length, p_pt, self.training_set_name, augmentation=sample_dict['augmentation']) 129 | imageTensorWarped, depthGTTensorWarped, extrinsic_para_W = warp_return_dict['image_warped'], warp_return_dict['depth_warped'], warp_return_dict['pose_perturbed'] 130 | 131 | # CPP step 132 | extrinsic_channel = get_extrinsic_channel(imageTensor, focal_length, p_pt, extrinsic_para, self.CEILING_HEIGHT, augmentation=sample_dict['augmentation']) 133 | extrinsic_channel_W = get_extrinsic_channel(imageTensorWarped, focal_length, p_pt, extrinsic_para_W, self.CEILING_HEIGHT, augmentation=sample_dict['augmentation']) 134 | 135 | imageTensor_C = torch.cat((imageTensor, extrinsic_channel), dim=1) 136 | imageTensor_C = imageTensor_C.to(self.device) 137 | depthGTTensor = depthGTTensor.to(self.device) # [B_size, 1, 240, 320] 138 | valid_mask = (depthGTTensor >= -1.) & (depthGTTensor <= 1.) 139 | 140 | imageTensorWarped_C = torch.cat((imageTensorWarped, extrinsic_channel_W), dim=1) 141 | imageTensorWarped_C = imageTensorWarped_C.to(self.device) 142 | depthGTTensorWarped = depthGTTensorWarped.to(self.device) # [B_size, 1, 240, 320] 143 | warped_valid_mask = (depthGTTensorWarped >= -1.) & (depthGTTensorWarped <= 1.) 144 | 145 | with torch.set_grad_enabled(phase=='train'): 146 | 147 | total_loss = 0. 148 | ############# train the depthEstimator 149 | self.depth_optimizer.zero_grad() 150 | depth_loss = self.compute_depth_loss(imageTensor_C, depthGTTensor, valid_mask) 151 | total_loss += depth_loss 152 | 153 | warped_depth_loss = self.compute_depth_loss(imageTensorWarped_C, depthGTTensorWarped, warped_valid_mask) 154 | total_loss += warped_depth_loss 155 | 156 | if self.use_apex: 157 | with amp.scale_loss(total_loss, self.depth_optimizer) as total_loss_scaled: 158 | total_loss_scaled.backward() 159 | else: 160 | total_loss.backward() 161 | 162 | self.depth_optimizer.step() 163 | 164 | iterCount += 1 165 | if iterCount % 20 == 0: 166 | loss_name = ['total_loss', 'depth_loss', 'warped_depth_loss'] 167 | loss_value = [total_loss, depth_loss, warped_depth_loss] 168 | self.print_and_write_loss_summary(iterCount, len(self.training_dataloader), loss_name, loss_value, self.train_log) 169 | 170 | # take step in optimizer 171 | for scheduler in self.scheduler_list: 172 | scheduler.step() 173 | # print learning rate 174 | for optim in self.optim_name: 175 | lr = getattr(self, optim).param_groups[0]['lr'] 176 | lr_update = 'Epoch {}/{} finished: {} learning rate = {:7f}'.format(epoch+1, self.total_epoch_num, optim, lr) 177 | print(lr_update) 178 | 179 | fn = open(self.train_log,'a') 180 | fn.write(lr_update + '\n') 181 | fn.close() 182 | 183 | if (epoch+1) % self.save_evaluate_steps == 0: 184 | self.save_models(self.model_name, mode=epoch+1) 185 | _ = self.evaluate(epoch+1) 186 | 187 | time_elapsed = time.time() - since 188 | print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) 189 | 190 | fn = open(self.train_log,'a') 191 | fn.write('Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)) 192 | fn.close() 193 | 194 | best_model_summary = 'Overall best model is epoch {}'.format(self.EVAL_best_model_epoch) 195 | print(best_model_summary) 196 | print(self.EVAL_all_results[str(self.EVAL_best_model_epoch)]) 197 | fn = open(self.evaluate_log, 'a') 198 | fn.write(best_model_summary + '\n') 199 | fn.write(self.EVAL_all_results[str(self.EVAL_best_model_epoch)] + '\n') 200 | fn.close() 201 | 202 | def evaluate(self, mode): 203 | ''' 204 | mode choose from or best 205 | is the number of epoch, represents the number of epoch, used for in training evaluation 206 | 'best' is used for after training mode 207 | ''' 208 | 209 | set_name = 'test' 210 | eval_model_list = ['depthEstModel'] 211 | 212 | if isinstance(mode, int) and self.is_train: 213 | self._set_models_eval(eval_model_list) 214 | if self.EVAL_best_loss == float('inf'): 215 | fn = open(self.evaluate_log, 'w') 216 | else: 217 | fn = open(self.evaluate_log, 'a') 218 | 219 | fn.write('Evaluating with mode: {} | dataset: {} \n'.format(mode, self.testing_set_name)) 220 | fn.write('\tEvaluation range min: {} | max: {} \n'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 221 | fn.close() 222 | 223 | else: 224 | self._load_models(eval_model_list, mode) 225 | 226 | print('Evaluating with mode: {} | dataset: {}'.format(mode, self.testing_set_name)) 227 | print('\tEvaluation range min: {} | max: {}'.format(self.EVAL_DEPTH_MIN, self.EVAL_DEPTH_MAX)) 228 | 229 | total_loss = 0. 230 | count = 0 231 | 232 | predTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 233 | grndTensor = torch.zeros((1, 1, self.H, self.W)).to('cpu') 234 | imgTensor = torch.zeros((1, 3, self.H, self.W)).to('cpu') 235 | extTensor = torch.zeros((1, 6)).to('cpu') 236 | idx = 0 237 | 238 | # tensorboardX_iter_count = 0 239 | with torch.no_grad(): 240 | for sample_dict in self.testing_dataloader: 241 | imageTensor, depthGTTensor = sample_dict['rgb'], sample_dict['depth'] 242 | extrinsic_para = sample_dict['extrinsic'].float() # otherwise mismatch data type double and float 243 | 244 | if "intrinsic" in sample_dict.keys(): 245 | # for ScanNet only 246 | intrinsic_para = sample_dict['intrinsic'].float() # fx, fy, px, py 247 | focal_length = intrinsic_para[:, :2] 248 | p_pt = intrinsic_para[:, 2:] 249 | else: 250 | # for interiorNet 251 | focal_length = 300 252 | p_pt = (120, 160) 253 | 254 | extrinsic_channel = get_extrinsic_channel(imageTensor, focal_length, p_pt, extrinsic_para, self.CEILING_HEIGHT) 255 | imageTensor_C = torch.cat((imageTensor, extrinsic_channel), dim=1) 256 | valid_mask = np.logical_and(depthGTTensor >= self.EVAL_DEPTH_MIN, depthGTTensor <= self.EVAL_DEPTH_MAX) 257 | 258 | idx += imageTensor.shape[0] 259 | print('epoch {}: have processed {} number samples in {} set'.format(mode, str(idx), set_name)) 260 | imageTensor_C = imageTensor_C.to(self.device) 261 | depthGTTensor = depthGTTensor.to(self.device) # real depth 262 | 263 | if self.is_train and self.use_apex: 264 | with amp.disable_casts(): 265 | predDepth = self.depthEstModel(imageTensor_C)[-1].detach().to('cpu') 266 | else: 267 | predDepth = self.depthEstModel(imageTensor_C)[-1].detach().to('cpu') 268 | 269 | # recover real depth 270 | predDepth = ((predDepth + 1.0) * 0.5 * (self.MAX_DEPTH_CLIP - self.MIN_DEPTH_CLIP)) + self.MIN_DEPTH_CLIP 271 | 272 | depthGTTensor = depthGTTensor.detach().to('cpu') 273 | predTensor = torch.cat((predTensor, predDepth), dim=0) 274 | grndTensor = torch.cat((grndTensor, depthGTTensor), dim=0) 275 | imgTensor = torch.cat((imgTensor, imageTensor.to('cpu')), dim=0) 276 | extTensor = torch.cat((extTensor, extrinsic_para), dim=0) 277 | 278 | if isinstance(mode, int) and self.is_train: 279 | eval_depth_loss = self.L1Loss(predDepth[valid_mask], depthGTTensor[valid_mask]) 280 | total_loss += eval_depth_loss.detach().cpu() 281 | 282 | count += 1 283 | 284 | if isinstance(mode, int) and self.is_train: 285 | validation_loss = (total_loss / count) 286 | 287 | results_nyu = Result(mask_min=self.EVAL_DEPTH_MIN, mask_max=self.EVAL_DEPTH_MAX) 288 | results_nyu.evaluate(predTensor[1:], grndTensor[1:]) 289 | individual_results = results_nyu.individual_results(predTensor[1:], grndTensor[1:]) 290 | 291 | result1 = '\tabs_rel:{:.3f}, sq_rel:{:.3f}, rmse:{:.3f}, rmse_log:{:.3f}, mae:{:.3f} '.format( 292 | results_nyu.absrel,results_nyu.sqrel,results_nyu.rmse,results_nyu.rmselog,results_nyu.mae) 293 | result2 = '\t[<1.25]:{:.3f}, [<1.25^2]:{:.3f}, [<1.25^3]::{:.3f}'.format(results_nyu.delta1,results_nyu.delta2,results_nyu.delta3) 294 | 295 | print(result1) 296 | print(result2) 297 | 298 | if isinstance(mode, int) and self.is_train: 299 | self.EVAL_all_results[str(mode)] = result1 + '\t' + result2 300 | 301 | if validation_loss.item() < self.EVAL_best_loss: 302 | self.EVAL_best_loss = validation_loss.item() 303 | self.EVAL_best_model_epoch = mode 304 | self.save_models(self.model_name, mode='best') 305 | 306 | best_model_summary = '\tCurrent eval loss {:.4f}, current best loss {:.4f}, current best model {}\n'.format(validation_loss.item(), self.EVAL_best_loss, self.EVAL_best_model_epoch) 307 | print(best_model_summary) 308 | 309 | fn = open(self.evaluate_log, 'a') 310 | fn.write(result1 + '\n') 311 | fn.write(result2 + '\n') 312 | fn.write(best_model_summary + '\n') 313 | fn.close() 314 | 315 | return_dict = {} 316 | return_dict['rgb'] = imgTensor[1:] 317 | return_dict['depth_pred'] = predTensor[1:] 318 | return_dict['depth_gt'] = grndTensor[1:] 319 | return_dict['extrinsic'] = extTensor[1:] 320 | return_dict['ind_results'] = individual_results 321 | 322 | return return_dict -------------------------------------------------------------------------------- /dataset/ScanNet/ScanNet_testing_natural_1080.txt: -------------------------------------------------------------------------------- 1 | scene0331_01_frame_002049 2 | scene0465_00_frame_004682 3 | scene0331_01_frame_002088 4 | scene0331_01_frame_001985 5 | scene0331_01_frame_000985 6 | scene0331_01_frame_002098 7 | scene0446_01_frame_000590 8 | scene0446_01_frame_000200 9 | scene0446_01_frame_000648 10 | scene0111_01_frame_000546 11 | scene0446_01_frame_000391 12 | scene0331_01_frame_000836 13 | scene0404_00_frame_000863 14 | scene0404_00_frame_002098 15 | scene0331_01_frame_003055 16 | scene0410_01_frame_000967 17 | scene0446_01_frame_001593 18 | scene0084_01_frame_000520 19 | scene0238_00_frame_000469 20 | scene0465_00_frame_006015 21 | scene0404_00_frame_003980 22 | scene0042_01_frame_000663 23 | scene0260_00_frame_001479 24 | scene0112_01_frame_001045 25 | scene0331_01_frame_000833 26 | scene0042_01_frame_000681 27 | scene0029_00_frame_001135 28 | scene0465_00_frame_003640 29 | scene0465_00_frame_001153 30 | scene0446_01_frame_000839 31 | scene0280_02_frame_000583 32 | scene0465_00_frame_005642 33 | scene0394_01_frame_001288 34 | scene0408_00_frame_001701 35 | scene0116_02_frame_001551 36 | scene0134_02_frame_000551 37 | scene0206_02_frame_001283 38 | scene0382_00_frame_001068 39 | scene0241_02_frame_000259 40 | scene0143_01_frame_001021 41 | scene0465_00_frame_005626 42 | scene0312_01_frame_001529 43 | scene0112_02_frame_000872 44 | scene0292_00_frame_000663 45 | scene0143_01_frame_001663 46 | scene0404_00_frame_003129 47 | scene0042_01_frame_000700 48 | scene0116_02_frame_001558 49 | scene0112_02_frame_000473 50 | scene0303_00_frame_000835 51 | scene0293_00_frame_000404 52 | scene0265_00_frame_000885 53 | scene0331_01_frame_003084 54 | scene0207_01_frame_001837 55 | scene0260_00_frame_001117 56 | scene0099_00_frame_000093 57 | scene0306_01_frame_000700 58 | scene0394_01_frame_001101 59 | scene0206_02_frame_001696 60 | scene0143_01_frame_001653 61 | scene0446_01_frame_001073 62 | scene0099_00_frame_000009 63 | scene0134_00_frame_000637 64 | scene0331_01_frame_003628 65 | scene0419_01_frame_001358 66 | scene0099_01_frame_000872 67 | scene0312_01_frame_001119 68 | scene0042_01_frame_001064 69 | scene0268_00_frame_000608 70 | scene0306_01_frame_000665 71 | scene0116_02_frame_001187 72 | scene0116_02_frame_001795 73 | scene0472_00_frame_000883 74 | scene0331_01_frame_002689 75 | scene0111_01_frame_000099 76 | scene0241_02_frame_000286 77 | scene0072_00_frame_000656 78 | scene0116_02_frame_001439 79 | scene0207_01_frame_001915 80 | scene0408_00_frame_001738 81 | scene0112_00_frame_000761 82 | scene0419_01_frame_001311 83 | scene0422_00_frame_000983 84 | scene0072_00_frame_000750 85 | scene0303_00_frame_001359 86 | scene0099_01_frame_000807 87 | scene0404_00_frame_000058 88 | scene0331_01_frame_005246 89 | scene0099_01_frame_000870 90 | scene0268_00_frame_000612 91 | scene0170_02_frame_000339 92 | scene0029_00_frame_001825 93 | scene0099_00_frame_000315 94 | scene0440_02_frame_000330 95 | scene0419_01_frame_002170 96 | scene0395_00_frame_001092 97 | scene0465_00_frame_002166 98 | scene0465_00_frame_005618 99 | scene0383_02_frame_001220 100 | scene0111_01_frame_002128 101 | scene0465_00_frame_005692 102 | scene0272_00_frame_000991 103 | scene0465_00_frame_003202 104 | scene0260_00_frame_000780 105 | scene0051_01_frame_001126 106 | scene0134_02_frame_000077 107 | scene0104_00_frame_000883 108 | scene0241_02_frame_000294 109 | scene0292_00_frame_000202 110 | scene0134_02_frame_000062 111 | scene0042_01_frame_000806 112 | scene0422_00_frame_000994 113 | scene0219_00_frame_000494 114 | scene0394_01_frame_001086 115 | scene0134_02_frame_000617 116 | scene0280_02_frame_001314 117 | scene0219_00_frame_000793 118 | scene0332_02_frame_000471 119 | scene0422_00_frame_000874 120 | scene0390_00_frame_001071 121 | scene0144_00_frame_000909 122 | scene0465_00_frame_004423 123 | scene0312_01_frame_000686 124 | scene0207_01_frame_001346 125 | scene0195_00_frame_000423 126 | scene0421_00_frame_000474 127 | scene0383_02_frame_001161 128 | scene0312_01_frame_002020 129 | scene0206_02_frame_001198 130 | scene0435_00_frame_002049 131 | scene0465_00_frame_006115 132 | scene0329_00_frame_001366 133 | scene0072_02_frame_000599 134 | scene0453_01_frame_000646 135 | scene0116_02_frame_001634 136 | scene0112_00_frame_000491 137 | scene0404_00_frame_003944 138 | scene0378_00_frame_000770 139 | scene0092_00_frame_001435 140 | scene0331_01_frame_002146 141 | scene0073_03_frame_000732 142 | scene0312_01_frame_002004 143 | scene0331_01_frame_004613 144 | scene0435_00_frame_003103 145 | scene0382_00_frame_000471 146 | scene0126_02_frame_000048 147 | scene0340_00_frame_001437 148 | scene0179_00_frame_000376 149 | scene0312_01_frame_000449 150 | scene0435_00_frame_002723 151 | scene0394_01_frame_000811 152 | scene0122_01_frame_000138 153 | scene0297_00_frame_001123 154 | scene0198_00_frame_000572 155 | scene0440_02_frame_000896 156 | scene0360_00_frame_001451 157 | scene0203_01_frame_000841 158 | scene0016_01_frame_001653 159 | scene0371_00_frame_000383 160 | scene0084_01_frame_000440 161 | scene0394_01_frame_001498 162 | scene0383_02_frame_000837 163 | scene0072_00_frame_001010 164 | scene0465_00_frame_000036 165 | scene0265_00_frame_001281 166 | scene0303_00_frame_000792 167 | scene0170_02_frame_000355 168 | scene0329_00_frame_000163 169 | scene0029_00_frame_000441 170 | scene0087_01_frame_000500 171 | scene0422_00_frame_000919 172 | scene0383_02_frame_001374 173 | scene0378_00_frame_000264 174 | scene0287_00_frame_000505 175 | scene0195_00_frame_000466 176 | scene0143_01_frame_001004 177 | scene0440_02_frame_000899 178 | scene0116_02_frame_000326 179 | scene0465_00_frame_000294 180 | scene0294_00_frame_000589 181 | scene0134_00_frame_000457 182 | scene0219_00_frame_000754 183 | scene0042_01_frame_000631 184 | scene0122_01_frame_000121 185 | scene0029_00_frame_001385 186 | scene0170_02_frame_000684 187 | scene0219_00_frame_000895 188 | scene0191_00_frame_000813 189 | scene0101_01_frame_001640 190 | scene0383_02_frame_001169 191 | scene0268_00_frame_000312 192 | scene0042_01_frame_001054 193 | scene0435_00_frame_002022 194 | scene0331_01_frame_005792 195 | scene0112_02_frame_000029 196 | scene0134_00_frame_000753 197 | scene0112_01_frame_000518 198 | scene0404_00_frame_003584 199 | scene0116_02_frame_000069 200 | scene0096_01_frame_001169 201 | scene0435_02_frame_001316 202 | scene0404_00_frame_002711 203 | scene0419_01_frame_000178 204 | scene0470_01_frame_000351 205 | scene0143_01_frame_000883 206 | scene0144_00_frame_000726 207 | scene0418_02_frame_001615 208 | scene0435_03_frame_001570 209 | scene0354_00_frame_000319 210 | scene0465_00_frame_004042 211 | scene0265_00_frame_000267 212 | scene0144_00_frame_001012 213 | scene0087_01_frame_000337 214 | scene0084_01_frame_001156 215 | scene0421_00_frame_001208 216 | scene0294_00_frame_000172 217 | scene0419_01_frame_000795 218 | scene0418_02_frame_001616 219 | scene0294_00_frame_001965 220 | scene0331_01_frame_005479 221 | scene0272_00_frame_000605 222 | scene0134_00_frame_000730 223 | scene0332_02_frame_000576 224 | scene0297_00_frame_000810 225 | scene0099_01_frame_000740 226 | scene0126_02_frame_000291 227 | scene0096_01_frame_000006 228 | scene0313_01_frame_000279 229 | scene0041_00_frame_000916 230 | scene0475_00_frame_000766 231 | scene0350_01_frame_001202 232 | scene0465_00_frame_002488 233 | scene0088_00_frame_000647 234 | scene0421_02_frame_000953 235 | scene0051_01_frame_000050 236 | scene0435_00_frame_000883 237 | scene0221_01_frame_000606 238 | scene0122_01_frame_000759 239 | scene0465_00_frame_004921 240 | scene0041_00_frame_001215 241 | scene0370_01_frame_001120 242 | scene0017_02_frame_000099 243 | scene0312_01_frame_001822 244 | scene0131_00_frame_000316 245 | scene0100_02_frame_000208 246 | scene0096_02_frame_001072 247 | scene0312_01_frame_000171 248 | scene0087_02_frame_000713 249 | scene0475_00_frame_000731 250 | scene0331_01_frame_004993 251 | scene0221_01_frame_000700 252 | scene0465_00_frame_005934 253 | scene0292_00_frame_000274 254 | scene0051_01_frame_001829 255 | scene0113_01_frame_000074 256 | scene0331_01_frame_004985 257 | scene0104_00_frame_000864 258 | scene0084_01_frame_001200 259 | scene0440_02_frame_000406 260 | scene0075_00_frame_000549 261 | scene0084_01_frame_001173 262 | scene0260_00_frame_001417 263 | scene0029_00_frame_001808 264 | scene0152_01_frame_000902 265 | scene0422_00_frame_000676 266 | scene0371_00_frame_000308 267 | scene0109_00_frame_001230 268 | scene0312_01_frame_000745 269 | scene0419_01_frame_001607 270 | scene0376_00_frame_000554 271 | scene0111_01_frame_001219 272 | scene0472_00_frame_000713 273 | scene0203_01_frame_000461 274 | scene0202_00_frame_000787 275 | scene0100_02_frame_000153 276 | scene0435_00_frame_002762 277 | scene0088_00_frame_000135 278 | scene0112_02_frame_000003 279 | scene0191_00_frame_000888 280 | scene0303_00_frame_000710 281 | scene0088_00_frame_000176 282 | scene0303_00_frame_001473 283 | scene0286_02_frame_000791 284 | scene0087_01_frame_000585 285 | scene0100_02_frame_000958 286 | scene0410_01_frame_001364 287 | scene0087_01_frame_000608 288 | scene0238_00_frame_000485 289 | scene0312_01_frame_001796 290 | scene0418_02_frame_000018 291 | scene0313_01_frame_000769 292 | scene0313_01_frame_000431 293 | scene0435_00_frame_002536 294 | scene0260_00_frame_000649 295 | scene0144_00_frame_000593 296 | scene0198_00_frame_000971 297 | scene0100_02_frame_000679 298 | scene0371_00_frame_000218 299 | scene0272_00_frame_001372 300 | scene0041_00_frame_001367 301 | scene0134_00_frame_000094 302 | scene0017_02_frame_000337 303 | scene0342_00_frame_000304 304 | scene0453_01_frame_000687 305 | scene0170_02_frame_000424 306 | scene0073_03_frame_001135 307 | scene0383_02_frame_001123 308 | scene0286_02_frame_000810 309 | scene0382_00_frame_000171 310 | scene0409_01_frame_001089 311 | scene0303_00_frame_000871 312 | scene0331_01_frame_003404 313 | scene0394_01_frame_001461 314 | scene0390_00_frame_001625 315 | scene0331_01_frame_001080 316 | scene0312_01_frame_001842 317 | scene0051_01_frame_001327 318 | scene0393_01_frame_000369 319 | scene0472_00_frame_001521 320 | scene0265_00_frame_000810 321 | scene0292_00_frame_000325 322 | scene0084_01_frame_000037 323 | scene0347_00_frame_000552 324 | scene0470_01_frame_000385 325 | scene0421_02_frame_000978 326 | scene0404_00_frame_000272 327 | scene0465_00_frame_005043 328 | scene0191_00_frame_000785 329 | scene0442_00_frame_001154 330 | scene0329_00_frame_000546 331 | scene0146_00_frame_000058 332 | scene0435_00_frame_003060 333 | scene0264_00_frame_001232 334 | scene0303_02_frame_000199 335 | scene0017_02_frame_000294 336 | scene0134_00_frame_000693 337 | scene0332_02_frame_000569 338 | scene0265_00_frame_000270 339 | scene0350_01_frame_000007 340 | scene0016_01_frame_000607 341 | scene0219_00_frame_000158 342 | scene0016_01_frame_000949 343 | scene0317_00_frame_000693 344 | scene0465_00_frame_001591 345 | scene0344_00_frame_000766 346 | scene0221_00_frame_000409 347 | scene0453_01_frame_000267 348 | scene0116_02_frame_001824 349 | scene0350_00_frame_001109 350 | scene0041_00_frame_002040 351 | scene0390_00_frame_000500 352 | scene0420_01_frame_001092 353 | scene0143_01_frame_001070 354 | scene0347_00_frame_000165 355 | scene0022_00_frame_001440 356 | scene0041_00_frame_001709 357 | scene0180_00_frame_000709 358 | scene0041_00_frame_001200 359 | scene0340_00_frame_001170 360 | scene0162_00_frame_000570 361 | scene0221_00_frame_000555 362 | scene0041_00_frame_002994 363 | scene0209_00_frame_000741 364 | scene0286_02_frame_000596 365 | scene0078_00_frame_000325 366 | scene0306_01_frame_000140 367 | scene0286_02_frame_001006 368 | scene0088_00_frame_000088 369 | scene0143_01_frame_000003 370 | scene0421_00_frame_001943 371 | scene0254_01_frame_001132 372 | scene0472_00_frame_000682 373 | scene0111_01_frame_002036 374 | scene0418_02_frame_000179 375 | scene0286_02_frame_000481 376 | scene0138_00_frame_000975 377 | scene0166_02_frame_001734 378 | scene0111_01_frame_001376 379 | scene0101_01_frame_001462 380 | scene0347_00_frame_000896 381 | scene0362_03_frame_000199 382 | scene0259_00_frame_000009 383 | scene0006_00_frame_001418 384 | scene0393_01_frame_001126 385 | scene0418_02_frame_000492 386 | scene0332_02_frame_001322 387 | scene0191_00_frame_001014 388 | scene0440_02_frame_000547 389 | scene0041_00_frame_001660 390 | scene0266_00_frame_001454 391 | scene0096_01_frame_000541 392 | scene0393_01_frame_001089 393 | scene0162_00_frame_000103 394 | scene0297_00_frame_000294 395 | scene0394_01_frame_000987 396 | scene0073_03_frame_000912 397 | scene0280_02_frame_000012 398 | scene0001_01_frame_000226 399 | scene0191_00_frame_000045 400 | scene0435_00_frame_002978 401 | scene0297_00_frame_000493 402 | scene0166_02_frame_000320 403 | scene0362_03_frame_000113 404 | scene0048_00_frame_000176 405 | scene0221_01_frame_000307 406 | scene0111_01_frame_000727 407 | scene0420_01_frame_000289 408 | scene0350_00_frame_001080 409 | scene0078_00_frame_000548 410 | scene0078_00_frame_000601 411 | scene0286_02_frame_000261 412 | scene0144_00_frame_000495 413 | scene0297_00_frame_000491 414 | scene0016_01_frame_000314 415 | scene0350_01_frame_001032 416 | scene0041_00_frame_001117 417 | scene0435_00_frame_002074 418 | scene0435_00_frame_001799 419 | scene0376_00_frame_000368 420 | scene0344_00_frame_000670 421 | scene0006_00_frame_001808 422 | scene0378_00_frame_001142 423 | scene0048_00_frame_000231 424 | scene0087_01_frame_000216 425 | scene0410_01_frame_001370 426 | scene0342_00_frame_000119 427 | scene0278_00_frame_000577 428 | scene0144_00_frame_001164 429 | scene0162_00_frame_000679 430 | scene0419_01_frame_001517 431 | scene0280_02_frame_000501 432 | scene0198_00_frame_000323 433 | scene0291_02_frame_000561 434 | scene0391_00_frame_000095 435 | scene0418_02_frame_000496 436 | scene0179_00_frame_001051 437 | scene0143_01_frame_000012 438 | scene0207_01_frame_001465 439 | scene0092_03_frame_000296 440 | scene0070_00_frame_000201 441 | scene0422_00_frame_001909 442 | scene0435_00_frame_001798 443 | scene0435_02_frame_000069 444 | scene0419_01_frame_002002 445 | scene0418_02_frame_001929 446 | scene0134_00_frame_000919 447 | scene0146_00_frame_000099 448 | scene0041_00_frame_000052 449 | scene0435_03_frame_001248 450 | scene0209_00_frame_000504 451 | scene0453_01_frame_001870 452 | scene0376_00_frame_000213 453 | scene0378_00_frame_000956 454 | scene0394_01_frame_001673 455 | scene0166_02_frame_002061 456 | scene0144_00_frame_001115 457 | scene0209_00_frame_000839 458 | scene0354_00_frame_001112 459 | scene0306_01_frame_000875 460 | scene0092_03_frame_002419 461 | scene0350_01_frame_001516 462 | scene0472_00_frame_000538 463 | scene0307_00_frame_001726 464 | scene0022_00_frame_000985 465 | scene0041_00_frame_000733 466 | scene0016_01_frame_001488 467 | scene0001_01_frame_001271 468 | scene0166_02_frame_001825 469 | scene0010_01_frame_000382 470 | scene0041_00_frame_003001 471 | scene0056_00_frame_000949 472 | scene0422_00_frame_001944 473 | scene0442_00_frame_000907 474 | scene0112_01_frame_000626 475 | scene0100_02_frame_000120 476 | scene0088_00_frame_000483 477 | scene0096_02_frame_001329 478 | scene0317_00_frame_000538 479 | scene0022_00_frame_000931 480 | scene0041_00_frame_001065 481 | scene0313_01_frame_000422 482 | scene0075_00_frame_000079 483 | scene0084_01_frame_000612 484 | scene0344_00_frame_000031 485 | scene0329_00_frame_001260 486 | scene0001_01_frame_000673 487 | scene0221_00_frame_000073 488 | scene0166_02_frame_000306 489 | scene0051_01_frame_001192 490 | scene0111_01_frame_002039 491 | scene0092_03_frame_001999 492 | scene0104_00_frame_000377 493 | scene0207_01_frame_001146 494 | scene0004_00_frame_000241 495 | scene0084_01_frame_000629 496 | scene0291_02_frame_001181 497 | scene0096_02_frame_001166 498 | scene0291_02_frame_001136 499 | scene0006_00_frame_000806 500 | scene0084_01_frame_000248 501 | scene0099_01_frame_000892 502 | scene0354_00_frame_000201 503 | scene0006_00_frame_000411 504 | scene0096_01_frame_000530 505 | scene0041_00_frame_002710 506 | scene0410_01_frame_001329 507 | scene0088_00_frame_000226 508 | scene0092_03_frame_002128 509 | scene0084_01_frame_001426 510 | scene0294_00_frame_001401 511 | scene0143_01_frame_001326 512 | scene0272_00_frame_001359 513 | scene0350_01_frame_001089 514 | scene0087_02_frame_000921 515 | scene0286_02_frame_000684 516 | scene0294_00_frame_001251 517 | scene0166_02_frame_001552 518 | scene0332_02_frame_000230 519 | scene0422_00_frame_000000 520 | scene0453_01_frame_001441 521 | scene0041_00_frame_001180 522 | scene0418_02_frame_000520 523 | scene0017_02_frame_000118 524 | scene0294_00_frame_000176 525 | scene0202_00_frame_000934 526 | scene0383_02_frame_000582 527 | scene0241_02_frame_000057 528 | scene0352_00_frame_000222 529 | scene0440_02_frame_000650 530 | scene0394_01_frame_001175 531 | scene0092_00_frame_001848 532 | scene0078_00_frame_000016 533 | scene0350_01_frame_001327 534 | scene0435_02_frame_000759 535 | scene0418_02_frame_000146 536 | scene0146_00_frame_000451 537 | scene0016_01_frame_000711 538 | scene0166_02_frame_000940 539 | scene0294_00_frame_001961 540 | scene0180_00_frame_000447 541 | scene0344_00_frame_000258 542 | scene0294_00_frame_000148 543 | scene0209_00_frame_001256 544 | scene0419_01_frame_000671 545 | scene0087_02_frame_001115 546 | scene0166_02_frame_000087 547 | scene0041_00_frame_000822 548 | scene0419_01_frame_001587 549 | scene0041_00_frame_002220 550 | scene0266_00_frame_002068 551 | scene0096_02_frame_001371 552 | scene0266_00_frame_001614 553 | scene0352_00_frame_000015 554 | scene0440_02_frame_000093 555 | scene0088_00_frame_000036 556 | scene0409_01_frame_000728 557 | scene0266_00_frame_000532 558 | scene0390_00_frame_000192 559 | scene0268_00_frame_000385 560 | scene0092_03_frame_002672 561 | scene0435_03_frame_000039 562 | scene0419_01_frame_001703 563 | scene0453_01_frame_002305 564 | scene0307_00_frame_000866 565 | scene0411_00_frame_000259 566 | scene0340_00_frame_000734 567 | scene0278_00_frame_000691 568 | scene0126_02_frame_001933 569 | scene0390_00_frame_000953 570 | scene0404_00_frame_000978 571 | scene0123_00_frame_000377 572 | scene0010_01_frame_000116 573 | scene0278_00_frame_000646 574 | scene0340_00_frame_000982 575 | scene0180_00_frame_001161 576 | scene0420_01_frame_001139 577 | scene0332_02_frame_000624 578 | scene0472_00_frame_001452 579 | scene0435_02_frame_000478 580 | scene0370_01_frame_001508 581 | scene0422_00_frame_000324 582 | scene0411_00_frame_000393 583 | scene0297_00_frame_000198 584 | scene0221_00_frame_000061 585 | scene0170_02_frame_000584 586 | scene0254_01_frame_000129 587 | scene0378_00_frame_000139 588 | scene0131_00_frame_000992 589 | scene0264_00_frame_000407 590 | scene0001_01_frame_000848 591 | scene0274_01_frame_000866 592 | scene0422_00_frame_001264 593 | scene0191_00_frame_000089 594 | scene0465_00_frame_001578 595 | scene0422_00_frame_002205 596 | scene0070_00_frame_000829 597 | scene0195_00_frame_000118 598 | scene0096_01_frame_000022 599 | scene0370_01_frame_001315 600 | scene0411_00_frame_000310 601 | scene0162_00_frame_000132 602 | scene0254_01_frame_000029 603 | scene0096_01_frame_000206 604 | scene0041_00_frame_002202 605 | scene0394_01_frame_001664 606 | scene0435_03_frame_000335 607 | scene0104_00_frame_000607 608 | scene0092_03_frame_000764 609 | scene0391_00_frame_000592 610 | scene0087_02_frame_001174 611 | scene0092_00_frame_002146 612 | scene0001_01_frame_001033 613 | scene0453_01_frame_000595 614 | scene0286_02_frame_000061 615 | scene0465_00_frame_002600 616 | scene0180_00_frame_000932 617 | scene0370_01_frame_000773 618 | scene0418_02_frame_000900 619 | scene0001_01_frame_000822 620 | scene0209_00_frame_001018 621 | scene0123_00_frame_000080 622 | scene0001_01_frame_001236 623 | scene0238_00_frame_000286 624 | scene0180_00_frame_000041 625 | scene0092_03_frame_001670 626 | scene0274_01_frame_000786 627 | scene0134_00_frame_000405 628 | scene0029_00_frame_000564 629 | scene0123_00_frame_000353 630 | scene0041_00_frame_000339 631 | scene0266_00_frame_000663 632 | scene0474_02_frame_001944 633 | scene0280_02_frame_001331 634 | scene0420_01_frame_000099 635 | scene0143_01_frame_001055 636 | scene0303_02_frame_000565 637 | scene0126_02_frame_001913 638 | scene0350_00_frame_001380 639 | scene0395_00_frame_001002 640 | scene0221_00_frame_000246 641 | scene0352_00_frame_000561 642 | scene0109_00_frame_000818 643 | scene0370_01_frame_001007 644 | scene0195_00_frame_001263 645 | scene0180_00_frame_000698 646 | scene0010_01_frame_001253 647 | scene0041_00_frame_002184 648 | scene0332_02_frame_001013 649 | scene0340_00_frame_000500 650 | scene0344_00_frame_000449 651 | scene0268_00_frame_000024 652 | scene0286_02_frame_000988 653 | scene0453_01_frame_000487 654 | scene0411_00_frame_000518 655 | scene0420_01_frame_000942 656 | scene0472_00_frame_000043 657 | scene0442_00_frame_000508 658 | scene0340_00_frame_000170 659 | scene0418_02_frame_002294 660 | scene0297_00_frame_000184 661 | scene0022_00_frame_000448 662 | scene0029_00_frame_001575 663 | scene0084_01_frame_000265 664 | scene0280_02_frame_000294 665 | scene0383_02_frame_000244 666 | scene0393_01_frame_001169 667 | scene0474_02_frame_000592 668 | scene0241_02_frame_001459 669 | scene0286_02_frame_001612 670 | scene0383_02_frame_001478 671 | scene0179_00_frame_000648 672 | scene0418_02_frame_000379 673 | scene0370_01_frame_001619 674 | scene0409_01_frame_000134 675 | scene0280_02_frame_001515 676 | scene0350_01_frame_000390 677 | scene0395_00_frame_000854 678 | scene0075_00_frame_000402 679 | scene0453_01_frame_000891 680 | scene0393_01_frame_000442 681 | scene0332_02_frame_000643 682 | scene0350_01_frame_001154 683 | scene0453_01_frame_000074 684 | scene0350_00_frame_001244 685 | scene0048_00_frame_001007 686 | scene0241_02_frame_001348 687 | scene0162_00_frame_000369 688 | scene0006_00_frame_001895 689 | scene0294_00_frame_002074 690 | scene0422_00_frame_001170 691 | scene0421_02_frame_001015 692 | scene0209_00_frame_000120 693 | scene0307_00_frame_000962 694 | scene0362_03_frame_000357 695 | scene0378_00_frame_000340 696 | scene0004_00_frame_000518 697 | scene0087_02_frame_000882 698 | scene0453_01_frame_000200 699 | scene0092_03_frame_002055 700 | scene0123_00_frame_000252 701 | scene0420_01_frame_001002 702 | scene0266_00_frame_001715 703 | scene0209_00_frame_000103 704 | scene0266_00_frame_001936 705 | scene0440_02_frame_000661 706 | scene0421_00_frame_000988 707 | scene0166_02_frame_000280 708 | scene0022_00_frame_001106 709 | scene0332_02_frame_000384 710 | scene0041_00_frame_000670 711 | scene0435_02_frame_000474 712 | scene0006_00_frame_001162 713 | scene0209_00_frame_001281 714 | scene0087_02_frame_000842 715 | scene0209_00_frame_001544 716 | scene0048_00_frame_000410 717 | scene0435_03_frame_000356 718 | scene0207_01_frame_000740 719 | scene0254_01_frame_000489 720 | scene0393_01_frame_001164 721 | scene0418_02_frame_001835 722 | scene0162_00_frame_000439 723 | scene0041_00_frame_000714 724 | scene0179_00_frame_000887 725 | scene0376_00_frame_000749 726 | scene0352_00_frame_000218 727 | scene0408_00_frame_001124 728 | scene0409_01_frame_000789 729 | scene0041_00_frame_000711 730 | scene0042_01_frame_000224 731 | scene0207_01_frame_001511 732 | scene0331_01_frame_004898 733 | scene0136_01_frame_000453 734 | scene0411_00_frame_000114 735 | scene0454_00_frame_000818 736 | scene0435_02_frame_001467 737 | scene0266_00_frame_001069 738 | scene0101_01_frame_001172 739 | scene0274_01_frame_000580 740 | scene0410_01_frame_001313 741 | scene0041_00_frame_003039 742 | scene0180_00_frame_000499 743 | scene0022_00_frame_000129 744 | scene0092_03_frame_001198 745 | scene0052_02_frame_000013 746 | scene0138_00_frame_000270 747 | scene0138_00_frame_001051 748 | scene0126_02_frame_000243 749 | scene0092_03_frame_002654 750 | scene0017_02_frame_000426 751 | scene0421_00_frame_000776 752 | scene0454_00_frame_000739 753 | scene0378_00_frame_001463 754 | scene0435_03_frame_000156 755 | scene0041_00_frame_003038 756 | scene0390_00_frame_000760 757 | scene0207_01_frame_000286 758 | scene0048_00_frame_000879 759 | scene0393_01_frame_001184 760 | scene0418_02_frame_001103 761 | scene0209_00_frame_001515 762 | scene0418_02_frame_000973 763 | scene0191_00_frame_000134 764 | scene0179_00_frame_000572 765 | scene0111_01_frame_000774 766 | scene0394_01_frame_000671 767 | scene0022_00_frame_000734 768 | scene0418_02_frame_000300 769 | scene0001_01_frame_001245 770 | scene0166_02_frame_000876 771 | scene0198_00_frame_000634 772 | scene0134_00_frame_000486 773 | scene0209_00_frame_000094 774 | scene0383_02_frame_000625 775 | scene0092_03_frame_000882 776 | scene0370_01_frame_000625 777 | scene0092_03_frame_001592 778 | scene0421_02_frame_001887 779 | scene0409_01_frame_000144 780 | scene0041_00_frame_000208 781 | scene0362_03_frame_000603 782 | scene0331_01_frame_004532 783 | scene0347_00_frame_000698 784 | scene0393_01_frame_000692 785 | scene0138_00_frame_001875 786 | scene0001_01_frame_000890 787 | scene0418_02_frame_001488 788 | scene0096_01_frame_001031 789 | scene0209_00_frame_001481 790 | scene0360_00_frame_000633 791 | scene0294_00_frame_002321 792 | scene0453_01_frame_001956 793 | scene0291_02_frame_000396 794 | scene0383_02_frame_001571 795 | scene0422_00_frame_002187 796 | scene0274_01_frame_001052 797 | scene0016_01_frame_000008 798 | scene0042_01_frame_000413 799 | scene0001_01_frame_001218 800 | scene0092_03_frame_001034 801 | scene0303_00_frame_000935 802 | scene0411_02_frame_000536 803 | scene0435_02_frame_001067 804 | scene0126_02_frame_000913 805 | scene0101_01_frame_001848 806 | scene0421_00_frame_000980 807 | scene0101_01_frame_001380 808 | scene0092_00_frame_002081 809 | scene0274_01_frame_000570 810 | scene0241_02_frame_000710 811 | scene0404_00_frame_000467 812 | scene0101_01_frame_000239 813 | scene0048_00_frame_000874 814 | scene0052_02_frame_000566 815 | scene0087_02_frame_001347 816 | scene0143_01_frame_001972 817 | scene0465_00_frame_000243 818 | scene0138_00_frame_001083 819 | scene0290_00_frame_002299 820 | scene0391_00_frame_000337 821 | scene0144_00_frame_001281 822 | scene0166_02_frame_002778 823 | scene0056_00_frame_000855 824 | scene0378_00_frame_001010 825 | scene0260_00_frame_001266 826 | scene0254_01_frame_000329 827 | scene0422_00_frame_001227 828 | scene0056_00_frame_001439 829 | scene0418_02_frame_002811 830 | scene0041_00_frame_000217 831 | scene0274_01_frame_000148 832 | scene0350_00_frame_000841 833 | scene0362_03_frame_000378 834 | scene0418_02_frame_001158 835 | scene0360_00_frame_000199 836 | scene0473_01_frame_000397 837 | scene0056_01_frame_000603 838 | scene0340_00_frame_000706 839 | scene0290_00_frame_000373 840 | scene0022_00_frame_001873 841 | scene0209_00_frame_001910 842 | scene0092_03_frame_001772 843 | scene0303_00_frame_001066 844 | scene0092_00_frame_001948 845 | scene0221_01_frame_000448 846 | scene0290_00_frame_001741 847 | scene0084_01_frame_000903 848 | scene0307_00_frame_000257 849 | scene0395_00_frame_001877 850 | scene0101_01_frame_000674 851 | scene0418_02_frame_002526 852 | scene0198_00_frame_000146 853 | scene0207_01_frame_000305 854 | scene0454_00_frame_000852 855 | scene0207_01_frame_000432 856 | scene0116_02_frame_000732 857 | scene0362_03_frame_000025 858 | scene0092_00_frame_000648 859 | scene0221_00_frame_000091 860 | scene0290_00_frame_002133 861 | scene0435_02_frame_000031 862 | scene0394_01_frame_001753 863 | scene0280_02_frame_000770 864 | scene0293_00_frame_000716 865 | scene0104_00_frame_000453 866 | scene0022_00_frame_001573 867 | scene0421_02_frame_001044 868 | scene0084_01_frame_000832 869 | scene0092_00_frame_000308 870 | scene0465_00_frame_001188 871 | scene0419_01_frame_000463 872 | scene0352_00_frame_000241 873 | scene0241_02_frame_001305 874 | scene0294_00_frame_002190 875 | scene0350_01_frame_000323 876 | scene0274_01_frame_001126 877 | scene0111_01_frame_002065 878 | scene0056_00_frame_000399 879 | scene0411_02_frame_000896 880 | scene0390_00_frame_000027 881 | scene0411_02_frame_000924 882 | scene0395_00_frame_000126 883 | scene0421_00_frame_001602 884 | scene0092_03_frame_000514 885 | scene0092_03_frame_000546 886 | scene0286_03_frame_000476 887 | scene0422_00_frame_000415 888 | scene0383_02_frame_000064 889 | scene0266_00_frame_000932 890 | scene0419_01_frame_001040 891 | scene0411_02_frame_001218 892 | scene0465_00_frame_002749 893 | scene0073_03_frame_000071 894 | scene0286_03_frame_000587 895 | scene0117_00_frame_000602 896 | scene0070_00_frame_000047 897 | scene0362_03_frame_000587 898 | scene0340_00_frame_000307 899 | scene0307_00_frame_001369 900 | scene0264_00_frame_000549 901 | scene0136_01_frame_000650 902 | scene0056_01_frame_000095 903 | scene0101_01_frame_001022 904 | scene0126_02_frame_000373 905 | scene0070_00_frame_001286 906 | scene0395_00_frame_000257 907 | scene0138_00_frame_000022 908 | scene0331_01_frame_001737 909 | scene0340_00_frame_000896 910 | scene0410_01_frame_001387 911 | scene0104_00_frame_000042 912 | scene0435_03_frame_000544 913 | scene0203_01_frame_000294 914 | scene0411_02_frame_000370 915 | scene0088_00_frame_000830 916 | scene0126_02_frame_000701 917 | scene0138_00_frame_000074 918 | scene0421_02_frame_001035 919 | scene0191_00_frame_000252 920 | scene0382_00_frame_000090 921 | scene0143_01_frame_002219 922 | scene0051_01_frame_000761 923 | scene0391_00_frame_000810 924 | scene0206_02_frame_000980 925 | scene0136_01_frame_000700 926 | scene0409_01_frame_000455 927 | scene0411_02_frame_000390 928 | scene0092_03_frame_001174 929 | scene0411_00_frame_000030 930 | scene0274_01_frame_000536 931 | scene0017_02_frame_000854 932 | scene0071_00_frame_000351 933 | scene0404_00_frame_003531 934 | scene0408_00_frame_001592 935 | scene0274_01_frame_001401 936 | scene0144_00_frame_000053 937 | scene0408_00_frame_000851 938 | scene0001_01_frame_000084 939 | scene0109_00_frame_000062 940 | scene0352_00_frame_000889 941 | scene0294_00_frame_002948 942 | scene0143_01_frame_000106 943 | scene0435_03_frame_001014 944 | scene0421_00_frame_000855 945 | scene0071_00_frame_000513 946 | scene0408_01_frame_001137 947 | scene0408_00_frame_000598 948 | scene0136_01_frame_000326 949 | scene0419_01_frame_002618 950 | scene0465_00_frame_002266 951 | scene0112_01_frame_000148 952 | scene0408_00_frame_000325 953 | scene0052_02_frame_000147 954 | scene0221_01_frame_000157 955 | scene0454_00_frame_000434 956 | scene0331_01_frame_000245 957 | scene0166_02_frame_001898 958 | scene0073_03_frame_000785 959 | scene0221_01_frame_000153 960 | scene0352_00_frame_000168 961 | scene0347_00_frame_000854 962 | scene0404_00_frame_004152 963 | scene0146_00_frame_001052 964 | scene0331_01_frame_000414 965 | scene0435_00_frame_000139 966 | scene0465_00_frame_004124 967 | scene0411_02_frame_000870 968 | scene0352_00_frame_000442 969 | scene0454_00_frame_000190 970 | scene0051_01_frame_001765 971 | scene0352_00_frame_000439 972 | scene0454_00_frame_000090 973 | scene0395_00_frame_001613 974 | scene0146_00_frame_001083 975 | scene0390_00_frame_000421 976 | scene0435_03_frame_001722 977 | scene0203_01_frame_000357 978 | scene0411_02_frame_000852 979 | scene0052_02_frame_000128 980 | scene0290_00_frame_000893 981 | scene0390_00_frame_000453 982 | scene0051_01_frame_001381 983 | scene0435_03_frame_001012 984 | scene0112_01_frame_000154 985 | scene0122_01_frame_000561 986 | scene0146_00_frame_001071 987 | scene0146_00_frame_001058 988 | scene0411_02_frame_001140 989 | scene0006_00_frame_000257 990 | scene0056_01_frame_000546 991 | scene0303_00_frame_000299 992 | scene0408_00_frame_000199 993 | scene0350_00_frame_001301 994 | scene0004_00_frame_000811 995 | scene0290_00_frame_000561 996 | scene0017_02_frame_000486 997 | scene0383_02_frame_000127 998 | scene0268_00_frame_000427 999 | scene0331_01_frame_005387 1000 | scene0166_02_frame_000392 1001 | scene0101_01_frame_000109 1002 | scene0435_03_frame_000234 1003 | scene0408_01_frame_000985 1004 | scene0084_01_frame_000094 1005 | scene0111_01_frame_000250 1006 | scene0395_00_frame_002285 1007 | scene0056_00_frame_001520 1008 | scene0099_00_frame_000675 1009 | scene0331_01_frame_000607 1010 | scene0393_01_frame_000242 1011 | scene0435_00_frame_002431 1012 | scene0331_01_frame_003185 1013 | scene0070_00_frame_001244 1014 | scene0290_00_frame_000958 1015 | scene0307_00_frame_001032 1016 | scene0378_00_frame_001328 1017 | scene0290_00_frame_000019 1018 | scene0307_00_frame_000563 1019 | scene0419_01_frame_002255 1020 | scene0138_00_frame_001986 1021 | scene0393_01_frame_000515 1022 | scene0421_02_frame_001075 1023 | scene0280_02_frame_000431 1024 | scene0070_00_frame_001194 1025 | scene0104_00_frame_000502 1026 | scene0126_02_frame_001355 1027 | scene0092_03_frame_001370 1028 | scene0286_03_frame_000640 1029 | scene0111_01_frame_000393 1030 | scene0350_00_frame_001295 1031 | scene0265_00_frame_000196 1032 | scene0331_01_frame_003231 1033 | scene0017_02_frame_000504 1034 | scene0331_01_frame_005907 1035 | scene0138_00_frame_001776 1036 | scene0290_00_frame_000008 1037 | scene0280_02_frame_000833 1038 | scene0111_01_frame_001435 1039 | scene0419_01_frame_002640 1040 | scene0421_02_frame_001946 1041 | scene0138_00_frame_001788 1042 | scene0166_02_frame_001092 1043 | scene0166_02_frame_002603 1044 | scene0297_00_frame_000660 1045 | scene0465_00_frame_002285 1046 | scene0096_01_frame_000611 1047 | scene0138_00_frame_001794 1048 | scene0116_02_frame_000973 1049 | scene0111_01_frame_001486 1050 | scene0331_01_frame_000641 1051 | scene0096_01_frame_000979 1052 | scene0070_00_frame_001260 1053 | scene0096_01_frame_000982 1054 | scene0101_01_frame_000211 1055 | scene0306_01_frame_000899 1056 | scene0166_02_frame_001986 1057 | scene0404_00_frame_002184 1058 | scene0408_01_frame_000693 1059 | scene0006_00_frame_001996 1060 | scene0408_00_frame_000901 1061 | scene0096_02_frame_000727 1062 | scene0331_01_frame_004254 1063 | scene0303_00_frame_001131 1064 | scene0331_01_frame_004403 1065 | scene0096_01_frame_001448 1066 | scene0395_00_frame_002112 1067 | scene0474_02_frame_000448 1068 | scene0410_01_frame_000681 1069 | scene0303_00_frame_001133 1070 | scene0474_02_frame_001294 1071 | scene0474_02_frame_000872 1072 | scene0465_00_frame_000171 1073 | scene0408_00_frame_001819 1074 | scene0410_01_frame_000725 1075 | scene0421_02_frame_001202 1076 | scene0474_02_frame_000888 1077 | scene0474_02_frame_002063 1078 | scene0360_00_frame_000705 1079 | scene0360_00_frame_000726 1080 | scene0404_00_frame_002437 1081 | -------------------------------------------------------------------------------- /dataset/ScanNet/ScanNet_testing_uniform_1080.txt: -------------------------------------------------------------------------------- 1 | scene0404_00_frame_003262 2 | scene0404_00_frame_003249 3 | scene0404_00_frame_003246 4 | scene0404_00_frame_003259 5 | scene0404_00_frame_003255 6 | scene0404_00_frame_003265 7 | scene0404_00_frame_003260 8 | scene0360_00_frame_001563 9 | scene0404_00_frame_003263 10 | scene0404_00_frame_003250 11 | scene0404_00_frame_003257 12 | scene0360_00_frame_001562 13 | scene0404_00_frame_003252 14 | scene0360_00_frame_001564 15 | scene0404_00_frame_003261 16 | scene0404_00_frame_003254 17 | scene0404_00_frame_003269 18 | scene0404_00_frame_003264 19 | scene0360_00_frame_001561 20 | scene0404_00_frame_003267 21 | scene0404_00_frame_003253 22 | scene0404_00_frame_003251 23 | scene0404_00_frame_003245 24 | scene0404_00_frame_003247 25 | scene0404_00_frame_003258 26 | scene0404_00_frame_003266 27 | scene0404_00_frame_003268 28 | scene0404_00_frame_003248 29 | scene0404_00_frame_003256 30 | scene0360_00_frame_001546 31 | scene0360_00_frame_001557 32 | scene0404_00_frame_003240 33 | scene0404_00_frame_003272 34 | scene0404_00_frame_003232 35 | scene0360_00_frame_001543 36 | scene0360_00_frame_001559 37 | scene0360_00_frame_001582 38 | scene0360_00_frame_001548 39 | scene0360_00_frame_001555 40 | scene0404_00_frame_003271 41 | scene0404_00_frame_003242 42 | scene0404_00_frame_003270 43 | scene0360_00_frame_001554 44 | scene0404_00_frame_003244 45 | scene0360_00_frame_001551 46 | scene0360_00_frame_001552 47 | scene0404_00_frame_003274 48 | scene0360_00_frame_001573 49 | scene0360_00_frame_001578 50 | scene0404_00_frame_003239 51 | scene0360_00_frame_001574 52 | scene0360_00_frame_001577 53 | scene0360_00_frame_001583 54 | scene0360_00_frame_001581 55 | scene0360_00_frame_001569 56 | scene0360_00_frame_001556 57 | scene0360_00_frame_001579 58 | scene0360_00_frame_001572 59 | scene0360_00_frame_001585 60 | scene0360_00_frame_001566 61 | scene0404_00_frame_003273 62 | scene0360_00_frame_001580 63 | scene0360_00_frame_001575 64 | scene0404_00_frame_003241 65 | scene0360_00_frame_001584 66 | scene0360_00_frame_001571 67 | scene0360_00_frame_001553 68 | scene0404_00_frame_003275 69 | scene0360_00_frame_001567 70 | scene0360_00_frame_000479 71 | scene0360_00_frame_000464 72 | scene0360_00_frame_001592 73 | scene0360_00_frame_000474 74 | scene0404_00_frame_003217 75 | scene0360_00_frame_000517 76 | scene0404_00_frame_003276 77 | scene0360_00_frame_000506 78 | scene0404_00_frame_003277 79 | scene0360_00_frame_000509 80 | scene0360_00_frame_001587 81 | scene0404_00_frame_003225 82 | scene0404_00_frame_003218 83 | scene0360_00_frame_000465 84 | scene0404_00_frame_003280 85 | scene0360_00_frame_000482 86 | scene0360_00_frame_000511 87 | scene0404_00_frame_003230 88 | scene0360_00_frame_000481 89 | scene0360_00_frame_000503 90 | scene0360_00_frame_000505 91 | scene0360_00_frame_000508 92 | scene0404_00_frame_003226 93 | scene0360_00_frame_000469 94 | scene0360_00_frame_000521 95 | scene0360_00_frame_000523 96 | scene0404_00_frame_003220 97 | scene0360_00_frame_000518 98 | scene0360_00_frame_001590 99 | scene0360_00_frame_001540 100 | scene0360_00_frame_000504 101 | scene0404_00_frame_003278 102 | scene0404_00_frame_003222 103 | scene0360_00_frame_000514 104 | scene0404_00_frame_003279 105 | scene0360_00_frame_001589 106 | scene0360_00_frame_000500 107 | scene0360_00_frame_000476 108 | scene0360_00_frame_001586 109 | scene0360_00_frame_000478 110 | scene0404_00_frame_003176 111 | scene0404_00_frame_003194 112 | scene0404_00_frame_003175 113 | scene0331_01_frame_002051 114 | scene0404_00_frame_003200 115 | scene0404_00_frame_003180 116 | scene0404_00_frame_003199 117 | scene0331_01_frame_000779 118 | scene0331_01_frame_000778 119 | scene0360_00_frame_000531 120 | scene0404_00_frame_003285 121 | scene0404_00_frame_003210 122 | scene0360_00_frame_000529 123 | scene0404_00_frame_003282 124 | scene0404_00_frame_003198 125 | scene0360_00_frame_000484 126 | scene0404_00_frame_003207 127 | scene0360_00_frame_000462 128 | scene0404_00_frame_003178 129 | scene0404_00_frame_003205 130 | scene0360_00_frame_001535 131 | scene0404_00_frame_003288 132 | scene0404_00_frame_002043 133 | scene0360_00_frame_001537 134 | scene0404_00_frame_003188 135 | scene0360_00_frame_000457 136 | scene0360_00_frame_000490 137 | scene0360_00_frame_001596 138 | scene0360_00_frame_001593 139 | scene0360_00_frame_000498 140 | scene0360_00_frame_000456 141 | scene0331_01_frame_002047 142 | scene0404_00_frame_003209 143 | scene0360_00_frame_000489 144 | scene0404_00_frame_002039 145 | scene0404_00_frame_003190 146 | scene0404_00_frame_002047 147 | scene0360_00_frame_000493 148 | scene0360_00_frame_001534 149 | scene0331_01_frame_002045 150 | scene0404_00_frame_003195 151 | scene0465_00_frame_004685 152 | scene0331_01_frame_002072 153 | scene0111_01_frame_000050 154 | scene0446_01_frame_001444 155 | scene0360_00_frame_000557 156 | scene0360_00_frame_000543 157 | scene0360_00_frame_000548 158 | scene0360_00_frame_001604 159 | scene0404_00_frame_002054 160 | scene0360_00_frame_000445 161 | scene0404_00_frame_004363 162 | scene0360_00_frame_000556 163 | scene0331_01_frame_004095 164 | scene0360_00_frame_000552 165 | scene0465_00_frame_004683 166 | scene0331_01_frame_000793 167 | scene0404_00_frame_004376 168 | scene0331_01_frame_004079 169 | scene0331_01_frame_002080 170 | scene0360_00_frame_000401 171 | scene0331_01_frame_004098 172 | scene0360_00_frame_000446 173 | scene0404_00_frame_003165 174 | scene0404_00_frame_004367 175 | scene0360_00_frame_000551 176 | scene0331_01_frame_000795 177 | scene0404_00_frame_004369 178 | scene0404_00_frame_004374 179 | scene0331_01_frame_002057 180 | scene0404_00_frame_004372 181 | scene0331_01_frame_002078 182 | scene0465_00_frame_004682 183 | scene0331_01_frame_004078 184 | scene0404_00_frame_003162 185 | scene0404_00_frame_004371 186 | scene0331_01_frame_004096 187 | scene0404_00_frame_003290 188 | scene0465_00_frame_004684 189 | scene0331_01_frame_000745 190 | scene0360_00_frame_000452 191 | scene0331_01_frame_000975 192 | scene0360_00_frame_000118 193 | scene0404_00_frame_004356 194 | scene0446_01_frame_000578 195 | scene0111_01_frame_000032 196 | scene0331_01_frame_002106 197 | scene0331_01_frame_004123 198 | scene0331_01_frame_001991 199 | scene0446_01_frame_001455 200 | scene0360_00_frame_001526 201 | scene0360_00_frame_001612 202 | scene0360_00_frame_000420 203 | scene0446_01_frame_000216 204 | scene0331_01_frame_001962 205 | scene0404_00_frame_002023 206 | scene0331_01_frame_000978 207 | scene0410_01_frame_000523 208 | scene0404_00_frame_004352 209 | scene0360_00_frame_000422 210 | scene0331_01_frame_000999 211 | scene0465_00_frame_004661 212 | scene0404_00_frame_003294 213 | scene0446_01_frame_000220 214 | scene0446_01_frame_001456 215 | scene0446_01_frame_000213 216 | scene0446_01_frame_001386 217 | scene0446_01_frame_001452 218 | scene0360_00_frame_001606 219 | scene0111_01_frame_000555 220 | scene0111_01_frame_000027 221 | scene0465_00_frame_005995 222 | scene0465_00_frame_004692 223 | scene0404_00_frame_002024 224 | scene0111_01_frame_000019 225 | scene0360_00_frame_000405 226 | scene0111_01_frame_000557 227 | scene0465_00_frame_004674 228 | scene0360_00_frame_000404 229 | scene0410_01_frame_000519 230 | scene0360_00_frame_000427 231 | scene0331_01_frame_000740 232 | scene0446_01_frame_001990 233 | scene0465_00_frame_005144 234 | scene0465_00_frame_004655 235 | scene0404_00_frame_000867 236 | scene0404_00_frame_001670 237 | scene0446_01_frame_000407 238 | scene0465_00_frame_006007 239 | scene0465_00_frame_005969 240 | scene0331_01_frame_003054 241 | scene0446_01_frame_002027 242 | scene0446_01_frame_001580 243 | scene0465_00_frame_001013 244 | scene0410_01_frame_000527 245 | scene0360_00_frame_000350 246 | scene0446_01_frame_000433 247 | scene0360_00_frame_001621 248 | scene0331_01_frame_003337 249 | scene0446_01_frame_001966 250 | scene0331_01_frame_005670 251 | scene0446_01_frame_000193 252 | scene0331_01_frame_003043 253 | scene0331_01_frame_002220 254 | scene0404_00_frame_002103 255 | scene0331_01_frame_005679 256 | scene0446_01_frame_001568 257 | scene0446_01_frame_000651 258 | scene0410_01_frame_000200 259 | scene0360_00_frame_000336 260 | scene0280_02_frame_000605 261 | scene0331_01_frame_005672 262 | scene0238_00_frame_000458 263 | scene0404_00_frame_001665 264 | scene0360_00_frame_000080 265 | scene0360_00_frame_000299 266 | scene0404_00_frame_002092 267 | scene0238_00_frame_000465 268 | scene0404_00_frame_003668 269 | scene0446_01_frame_000416 270 | scene0404_00_frame_000000 271 | scene0404_00_frame_001261 272 | scene0421_00_frame_000395 273 | scene0421_00_frame_001524 274 | scene0446_01_frame_001551 275 | scene0360_00_frame_000000 276 | scene0331_01_frame_000959 277 | scene0465_00_frame_001143 278 | scene0465_00_frame_001052 279 | scene0421_00_frame_001510 280 | scene0421_00_frame_001507 281 | scene0111_01_frame_000493 282 | scene0272_00_frame_000893 283 | scene0280_02_frame_000554 284 | scene0446_01_frame_000845 285 | scene0331_01_frame_004134 286 | scene0280_02_frame_000688 287 | scene0331_01_frame_000720 288 | scene0446_01_frame_001957 289 | scene0446_01_frame_000821 290 | scene0404_00_frame_000032 291 | scene0029_00_frame_001141 292 | scene0280_02_frame_000676 293 | scene0404_00_frame_003632 294 | scene0360_00_frame_000341 295 | scene0446_01_frame_000456 296 | scene0465_00_frame_001091 297 | scene0029_00_frame_000008 298 | scene0331_01_frame_000002 299 | scene0143_01_frame_001678 300 | scene0410_01_frame_000439 301 | scene0331_01_frame_004135 302 | scene0152_01_frame_001071 303 | scene0446_01_frame_001363 304 | scene0421_00_frame_000384 305 | scene0446_01_frame_000568 306 | scene0446_01_frame_000463 307 | scene0029_00_frame_001039 308 | scene0395_00_frame_001488 309 | scene0360_00_frame_001172 310 | scene0143_01_frame_000947 311 | scene0331_01_frame_003710 312 | scene0111_01_frame_000948 313 | scene0465_00_frame_004385 314 | scene0272_00_frame_001606 315 | scene0446_01_frame_000152 316 | scene0465_00_frame_002084 317 | scene0446_01_frame_001414 318 | scene0134_02_frame_000090 319 | scene0446_01_frame_000331 320 | scene0395_00_frame_001531 321 | scene0446_01_frame_001605 322 | scene0404_00_frame_002125 323 | scene0394_01_frame_001379 324 | scene0272_00_frame_000645 325 | scene0331_01_frame_001943 326 | scene0421_02_frame_001356 327 | scene0408_00_frame_001732 328 | scene0360_00_frame_001640 329 | scene0203_01_frame_001221 330 | scene0465_00_frame_003645 331 | scene0051_01_frame_001674 332 | scene0134_00_frame_000601 333 | scene0272_00_frame_000956 334 | scene0404_00_frame_003615 335 | scene0404_00_frame_003961 336 | scene0099_00_frame_000326 337 | scene0143_01_frame_001655 338 | scene0446_01_frame_001327 339 | scene0272_00_frame_000959 340 | scene0446_01_frame_000260 341 | scene0099_00_frame_000407 342 | scene0260_00_frame_001100 343 | scene0265_00_frame_000873 344 | scene0331_01_frame_003936 345 | scene0404_00_frame_001721 346 | scene0465_00_frame_003680 347 | scene0446_01_frame_000333 348 | scene0116_02_frame_001406 349 | scene0293_00_frame_000400 350 | scene0198_00_frame_001069 351 | scene0331_01_frame_000040 352 | scene0306_01_frame_000715 353 | scene0004_00_frame_000592 354 | scene0099_01_frame_000003 355 | scene0382_00_frame_000335 356 | scene0465_00_frame_005951 357 | scene0317_00_frame_000852 358 | scene0146_00_frame_000942 359 | scene0146_00_frame_001210 360 | scene0112_01_frame_000005 361 | scene0112_00_frame_000510 362 | scene0419_01_frame_000850 363 | scene0331_01_frame_002653 364 | scene0331_01_frame_003747 365 | scene0272_00_frame_000750 366 | scene0029_00_frame_000292 367 | scene0029_00_frame_001480 368 | scene0404_00_frame_002134 369 | scene0404_00_frame_003316 370 | scene0395_00_frame_001453 371 | scene0395_00_frame_001532 372 | scene0378_00_frame_000874 373 | scene0092_03_frame_002580 374 | scene0394_01_frame_000711 375 | scene0191_00_frame_000931 376 | scene0143_01_frame_000773 377 | scene0143_01_frame_001850 378 | scene0143_01_frame_001853 379 | scene0265_00_frame_000630 380 | scene0051_01_frame_000977 381 | scene0134_02_frame_000662 382 | scene0134_02_frame_000664 383 | scene0131_00_frame_000864 384 | scene0131_00_frame_000865 385 | scene0131_00_frame_000871 386 | scene0131_00_frame_000898 387 | scene0131_00_frame_000902 388 | scene0131_00_frame_000903 389 | scene0112_02_frame_000868 390 | scene0073_03_frame_001259 391 | scene0048_00_frame_000607 392 | scene0446_01_frame_000040 393 | scene0410_01_frame_000544 394 | scene0195_00_frame_000608 395 | scene0306_01_frame_000829 396 | scene0004_00_frame_000577 397 | scene0041_00_frame_001025 398 | scene0041_00_frame_001030 399 | scene0465_00_frame_001602 400 | scene0465_00_frame_003579 401 | scene0465_00_frame_005578 402 | scene0022_00_frame_000892 403 | scene0354_00_frame_001042 404 | scene0354_00_frame_001063 405 | scene0419_01_frame_000955 406 | scene0419_01_frame_002744 407 | scene0419_01_frame_002749 408 | scene0419_01_frame_002857 409 | scene0170_02_frame_000700 410 | scene0170_02_frame_000706 411 | scene0170_02_frame_000723 412 | scene0170_02_frame_000784 413 | scene0016_01_frame_000614 414 | scene0280_02_frame_000713 415 | scene0280_02_frame_000715 416 | scene0191_00_frame_000714 417 | scene0143_01_frame_001849 418 | scene0265_00_frame_001236 419 | scene0051_01_frame_001144 420 | scene0152_01_frame_001366 421 | scene0152_01_frame_001367 422 | scene0152_01_frame_001369 423 | scene0152_01_frame_001377 424 | scene0152_01_frame_001383 425 | scene0152_01_frame_001404 426 | scene0131_00_frame_000862 427 | scene0122_01_frame_000219 428 | scene0122_01_frame_000220 429 | scene0122_01_frame_000224 430 | scene0112_02_frame_000838 431 | scene0421_00_frame_000496 432 | scene0004_00_frame_000117 433 | scene0004_00_frame_000122 434 | scene0004_00_frame_000184 435 | scene0004_00_frame_000187 436 | scene0004_00_frame_000216 437 | scene0004_00_frame_000252 438 | scene0004_00_frame_000564 439 | scene0004_00_frame_000567 440 | scene0004_00_frame_000568 441 | scene0004_00_frame_000703 442 | scene0041_00_frame_000987 443 | scene0041_00_frame_001037 444 | scene0312_01_frame_000892 445 | scene0022_00_frame_000919 446 | scene0342_00_frame_000294 447 | scene0342_00_frame_000313 448 | scene0170_02_frame_000656 449 | scene0404_00_frame_000313 450 | scene0404_00_frame_002868 451 | scene0404_00_frame_002874 452 | scene0404_00_frame_002879 453 | scene0404_00_frame_003328 454 | scene0404_00_frame_003332 455 | scene0340_00_frame_001466 456 | scene0191_00_frame_000885 457 | scene0191_00_frame_000890 458 | scene0143_01_frame_000906 459 | scene0051_01_frame_000964 460 | scene0152_01_frame_001256 461 | scene0152_01_frame_001282 462 | scene0152_01_frame_001351 463 | scene0152_01_frame_001358 464 | scene0122_01_frame_000236 465 | scene0122_01_frame_000855 466 | scene0092_00_frame_001855 467 | scene0092_00_frame_001865 468 | scene0113_01_frame_000676 469 | scene0112_02_frame_000512 470 | scene0112_02_frame_000513 471 | scene0329_00_frame_000974 472 | scene0446_01_frame_001656 473 | scene0004_00_frame_000163 474 | scene0004_00_frame_000209 475 | scene0004_00_frame_000225 476 | scene0004_00_frame_000242 477 | scene0004_00_frame_000514 478 | scene0004_00_frame_000520 479 | scene0004_00_frame_000553 480 | scene0004_00_frame_000562 481 | scene0004_00_frame_000563 482 | scene0004_00_frame_000736 483 | scene0303_02_frame_000101 484 | scene0465_00_frame_002023 485 | scene0312_01_frame_000973 486 | scene0312_01_frame_000975 487 | scene0022_00_frame_000865 488 | scene0022_00_frame_000872 489 | scene0342_00_frame_000099 490 | scene0342_00_frame_000119 491 | scene0342_00_frame_000125 492 | scene0342_00_frame_000288 493 | scene0342_00_frame_000335 494 | scene0331_01_frame_004347 495 | scene0170_02_frame_000630 496 | scene0170_02_frame_000631 497 | scene0170_02_frame_000632 498 | scene0170_02_frame_000634 499 | scene0404_00_frame_003333 500 | scene0404_00_frame_003337 501 | scene0191_00_frame_000692 502 | scene0191_00_frame_000874 503 | scene0221_01_frame_000510 504 | scene0221_01_frame_000517 505 | scene0221_01_frame_000536 506 | scene0051_01_frame_000936 507 | scene0051_01_frame_001015 508 | scene0152_01_frame_001285 509 | scene0209_00_frame_001733 510 | scene0092_00_frame_000523 511 | scene0329_00_frame_001006 512 | scene0004_00_frame_000279 513 | scene0004_00_frame_000289 514 | scene0004_00_frame_000304 515 | scene0004_00_frame_000486 516 | scene0004_00_frame_000495 517 | scene0004_00_frame_000498 518 | scene0004_00_frame_000500 519 | scene0004_00_frame_000511 520 | scene0004_00_frame_000748 521 | scene0004_00_frame_000752 522 | scene0004_00_frame_000756 523 | scene0004_00_frame_000918 524 | scene0004_00_frame_000922 525 | scene0004_00_frame_000926 526 | scene0312_01_frame_000907 527 | scene0312_01_frame_000908 528 | scene0312_01_frame_000911 529 | scene0312_01_frame_000912 530 | scene0317_00_frame_000425 531 | scene0317_00_frame_000427 532 | scene0317_00_frame_000433 533 | scene0317_00_frame_000440 534 | scene0317_00_frame_000448 535 | scene0317_00_frame_000449 536 | scene0317_00_frame_000471 537 | scene0022_00_frame_000858 538 | scene0022_00_frame_001202 539 | scene0112_00_frame_000131 540 | scene0342_00_frame_000081 541 | scene0342_00_frame_000090 542 | scene0342_00_frame_000357 543 | scene0342_00_frame_000374 544 | scene0342_00_frame_000384 545 | scene0331_01_frame_004351 546 | scene0404_00_frame_003342 547 | scene0378_00_frame_001546 548 | scene0191_00_frame_000477 549 | scene0051_01_frame_000929 550 | scene0051_01_frame_001023 551 | scene0051_01_frame_001025 552 | scene0259_00_frame_000381 553 | scene0446_01_frame_001666 554 | scene0306_01_frame_000735 555 | scene0004_00_frame_000479 556 | scene0004_00_frame_000772 557 | scene0004_00_frame_000777 558 | scene0004_00_frame_000778 559 | scene0004_00_frame_000784 560 | scene0004_00_frame_000787 561 | scene0004_00_frame_000788 562 | scene0004_00_frame_000880 563 | scene0004_00_frame_000886 564 | scene0004_00_frame_000890 565 | scene0004_00_frame_000891 566 | scene0312_01_frame_000914 567 | scene0312_01_frame_000915 568 | scene0312_01_frame_000925 569 | scene0312_01_frame_000934 570 | scene0312_01_frame_000935 571 | scene0312_01_frame_000963 572 | scene0203_01_frame_000965 573 | scene0022_00_frame_001214 574 | scene0022_00_frame_001215 575 | scene0342_00_frame_000034 576 | scene0342_00_frame_000060 577 | scene0342_00_frame_000065 578 | scene0342_00_frame_000073 579 | scene0331_01_frame_004355 580 | scene0331_01_frame_004356 581 | scene0331_01_frame_004360 582 | scene0404_00_frame_002765 583 | scene0404_00_frame_002768 584 | scene0404_00_frame_002770 585 | scene0001_01_frame_000342 586 | scene0001_01_frame_000345 587 | scene0051_01_frame_000926 588 | scene0290_00_frame_000418 589 | scene0290_00_frame_000422 590 | scene0290_00_frame_001050 591 | scene0290_00_frame_001059 592 | scene0290_00_frame_001062 593 | scene0446_01_frame_001680 594 | scene0446_01_frame_001684 595 | scene0446_01_frame_001685 596 | scene0306_01_frame_000387 597 | scene0306_01_frame_000401 598 | scene0004_00_frame_000325 599 | scene0004_00_frame_000326 600 | scene0004_00_frame_000329 601 | scene0004_00_frame_000336 602 | scene0004_00_frame_000339 603 | scene0004_00_frame_000340 604 | scene0004_00_frame_000342 605 | scene0004_00_frame_000442 606 | scene0004_00_frame_000443 607 | scene0004_00_frame_000460 608 | scene0004_00_frame_000469 609 | scene0004_00_frame_000794 610 | scene0004_00_frame_000795 611 | scene0004_00_frame_000797 612 | scene0004_00_frame_000798 613 | scene0004_00_frame_000801 614 | scene0004_00_frame_000803 615 | scene0004_00_frame_000805 616 | scene0203_01_frame_000970 617 | scene0203_01_frame_000978 618 | scene0203_01_frame_001374 619 | scene0022_00_frame_001224 620 | scene0022_00_frame_001225 621 | scene0022_00_frame_001226 622 | scene0022_00_frame_001227 623 | scene0022_00_frame_001231 624 | scene0342_00_frame_000012 625 | scene0342_00_frame_000018 626 | scene0342_00_frame_000020 627 | scene0342_00_frame_000038 628 | scene0331_01_frame_004361 629 | scene0404_00_frame_002776 630 | scene0290_00_frame_001043 631 | scene0290_00_frame_001044 632 | scene0290_00_frame_001049 633 | scene0446_01_frame_001686 634 | scene0446_01_frame_001689 635 | scene0446_01_frame_001690 636 | scene0446_01_frame_001692 637 | scene0446_01_frame_001693 638 | scene0446_01_frame_001696 639 | scene0446_01_frame_001697 640 | scene0446_01_frame_001699 641 | scene0306_01_frame_000365 642 | scene0306_01_frame_000369 643 | scene0306_01_frame_000373 644 | scene0306_01_frame_000795 645 | scene0004_00_frame_000343 646 | scene0004_00_frame_000344 647 | scene0004_00_frame_000345 648 | scene0004_00_frame_000346 649 | scene0004_00_frame_000347 650 | scene0004_00_frame_000351 651 | scene0004_00_frame_000353 652 | scene0004_00_frame_000354 653 | scene0004_00_frame_000355 654 | scene0004_00_frame_000356 655 | scene0004_00_frame_000358 656 | scene0004_00_frame_000359 657 | scene0331_01_frame_004366 658 | scene0331_01_frame_004367 659 | scene0404_00_frame_002780 660 | scene0404_00_frame_002781 661 | scene0404_00_frame_002782 662 | scene0404_00_frame_002783 663 | scene0404_00_frame_002784 664 | scene0143_01_frame_000396 665 | scene0143_01_frame_000398 666 | scene0143_01_frame_000415 667 | scene0143_01_frame_000419 668 | scene0051_01_frame_000119 669 | scene0051_01_frame_001035 670 | scene0051_01_frame_001037 671 | scene0134_02_frame_000317 672 | scene0290_00_frame_001041 673 | scene0446_01_frame_001700 674 | scene0446_01_frame_001701 675 | scene0446_01_frame_001702 676 | scene0446_01_frame_001703 677 | scene0446_01_frame_001704 678 | scene0446_01_frame_001705 679 | scene0446_01_frame_001706 680 | scene0446_01_frame_001707 681 | scene0446_01_frame_001708 682 | scene0446_01_frame_001709 683 | scene0446_01_frame_001710 684 | scene0004_00_frame_000361 685 | scene0203_01_frame_000381 686 | scene0203_01_frame_000386 687 | scene0331_01_frame_002294 688 | scene0029_00_frame_001767 689 | scene0280_02_frame_000111 690 | scene0280_02_frame_000433 691 | scene0280_02_frame_000437 692 | scene0280_02_frame_000438 693 | scene0280_02_frame_000441 694 | scene0280_02_frame_000442 695 | scene0404_00_frame_002785 696 | scene0404_00_frame_002786 697 | scene0404_00_frame_002787 698 | scene0404_00_frame_002788 699 | scene0404_00_frame_002789 700 | scene0404_00_frame_002790 701 | scene0404_00_frame_002791 702 | scene0143_01_frame_000406 703 | scene0143_01_frame_000407 704 | scene0143_01_frame_000699 705 | scene0143_01_frame_000701 706 | scene0143_01_frame_000704 707 | scene0051_01_frame_000087 708 | scene0051_01_frame_000094 709 | scene0051_01_frame_000100 710 | scene0051_01_frame_000105 711 | scene0051_01_frame_000108 712 | scene0051_01_frame_000836 713 | scene0134_02_frame_000315 714 | scene0446_01_frame_001711 715 | scene0446_01_frame_001712 716 | scene0446_01_frame_001713 717 | scene0446_01_frame_001714 718 | scene0306_01_frame_000785 719 | scene0306_01_frame_000786 720 | scene0306_01_frame_000787 721 | scene0306_01_frame_000788 722 | scene0072_02_frame_001105 723 | scene0465_00_frame_001928 724 | scene0465_00_frame_002002 725 | scene0408_01_frame_000848 726 | scene0408_01_frame_000849 727 | scene0331_01_frame_002300 728 | scene0331_01_frame_002303 729 | scene0331_01_frame_002305 730 | scene0280_02_frame_000084 731 | scene0280_02_frame_000087 732 | scene0280_02_frame_000088 733 | scene0280_02_frame_000099 734 | scene0280_02_frame_000102 735 | scene0280_02_frame_000103 736 | scene0280_02_frame_000105 737 | scene0280_02_frame_000106 738 | scene0280_02_frame_000107 739 | scene0404_00_frame_002792 740 | scene0395_00_frame_002051 741 | scene0051_01_frame_000843 742 | scene0051_01_frame_000844 743 | scene0051_01_frame_001616 744 | scene0051_01_frame_001617 745 | scene0051_01_frame_001618 746 | scene0051_01_frame_001620 747 | scene0134_02_frame_000310 748 | scene0134_02_frame_000312 749 | scene0134_02_frame_000313 750 | scene0138_00_frame_001990 751 | scene0421_02_frame_000139 752 | scene0421_02_frame_000153 753 | scene0421_02_frame_001096 754 | scene0306_01_frame_000540 755 | scene0306_01_frame_000541 756 | scene0306_01_frame_000773 757 | scene0306_01_frame_000775 758 | scene0306_01_frame_000778 759 | scene0306_01_frame_000783 760 | scene0072_02_frame_000739 761 | scene0465_00_frame_001929 762 | scene0465_00_frame_001997 763 | scene0465_00_frame_001998 764 | scene0465_00_frame_001999 765 | scene0465_00_frame_002000 766 | scene0465_00_frame_002001 767 | scene0408_01_frame_000845 768 | scene0203_01_frame_000615 769 | scene0331_01_frame_005347 770 | scene0331_01_frame_005348 771 | scene0331_01_frame_005349 772 | scene0331_01_frame_005351 773 | scene0331_01_frame_005352 774 | scene0331_01_frame_005353 775 | scene0331_01_frame_005354 776 | scene0331_01_frame_005355 777 | scene0331_01_frame_005356 778 | scene0280_02_frame_000095 779 | scene0280_02_frame_001482 780 | scene0280_02_frame_001483 781 | scene0404_00_frame_002953 782 | scene0116_02_frame_000798 783 | scene0051_01_frame_000846 784 | scene0051_01_frame_000851 785 | scene0051_01_frame_000852 786 | scene0134_02_frame_000211 787 | scene0134_02_frame_000226 788 | scene0134_02_frame_000229 789 | scene0134_02_frame_000280 790 | scene0134_02_frame_000300 791 | scene0134_02_frame_000305 792 | scene0134_02_frame_000307 793 | scene0474_02_frame_002266 794 | scene0112_02_frame_000324 795 | scene0404_00_frame_002976 796 | scene0404_00_frame_002988 797 | scene0051_01_frame_000856 798 | scene0474_02_frame_000869 799 | scene0421_00_frame_001380 800 | scene0421_00_frame_001386 801 | scene0465_00_frame_001496 802 | scene0099_01_frame_000200 803 | scene0421_02_frame_001135 804 | scene0290_00_frame_001519 805 | scene0421_02_frame_001495 806 | scene0408_00_frame_001649 807 | scene0390_00_frame_001916 808 | scene0404_00_frame_004180 809 | scene0072_02_frame_000749 810 | scene0421_00_frame_001679 811 | scene0029_00_frame_001742 812 | scene0421_00_frame_000225 813 | scene0303_00_frame_001164 814 | scene0395_00_frame_002221 815 | scene0408_00_frame_001818 816 | scene0465_00_frame_001485 817 | scene0280_02_frame_001448 818 | scene0116_02_frame_000805 819 | scene0421_00_frame_001663 820 | scene0404_00_frame_003855 821 | scene0421_00_frame_001674 822 | scene0404_00_frame_003865 823 | scene0029_00_frame_001738 824 | scene0206_02_frame_000011 825 | scene0474_02_frame_000528 826 | scene0421_00_frame_000201 827 | scene0303_00_frame_001192 828 | scene0303_00_frame_001174 829 | scene0474_02_frame_002332 830 | scene0421_02_frame_001140 831 | scene0395_00_frame_002231 832 | scene0390_00_frame_001280 833 | scene0404_00_frame_003073 834 | scene0421_02_frame_001583 835 | scene0421_00_frame_001683 836 | scene0421_02_frame_000400 837 | scene0360_00_frame_000697 838 | scene0421_00_frame_001687 839 | scene0421_02_frame_001196 840 | scene0395_00_frame_002189 841 | scene0421_02_frame_000391 842 | scene0390_00_frame_001864 843 | scene0280_02_frame_001414 844 | scene0474_02_frame_000895 845 | scene0465_00_frame_001939 846 | scene0474_02_frame_000519 847 | scene0360_00_frame_001906 848 | scene0404_00_frame_002535 849 | scene0474_02_frame_000485 850 | scene0390_00_frame_001906 851 | scene0408_00_frame_001179 852 | scene0421_00_frame_001691 853 | scene0474_02_frame_002270 854 | scene0421_02_frame_001539 855 | scene0404_00_frame_003451 856 | scene0360_00_frame_001902 857 | scene0395_00_frame_002216 858 | scene0072_02_frame_000754 859 | scene0404_00_frame_003065 860 | scene0360_00_frame_000699 861 | scene0099_01_frame_000377 862 | scene0474_02_frame_000472 863 | scene0421_02_frame_001200 864 | scene0465_00_frame_001989 865 | scene0421_00_frame_001402 866 | scene0134_02_frame_000265 867 | scene0474_02_frame_000476 868 | scene0408_01_frame_000007 869 | scene0072_02_frame_000761 870 | scene0421_00_frame_002142 871 | scene0421_02_frame_000385 872 | scene0474_02_frame_000483 873 | scene0474_02_frame_001274 874 | scene0404_00_frame_002526 875 | scene0404_00_frame_002387 876 | scene0404_00_frame_002520 877 | scene0404_00_frame_004212 878 | scene0099_00_frame_000478 879 | scene0280_02_frame_001460 880 | scene0421_02_frame_001162 881 | scene0474_02_frame_002110 882 | scene0099_00_frame_000600 883 | scene0116_02_frame_000857 884 | scene0360_00_frame_000761 885 | scene0474_02_frame_000492 886 | scene0421_00_frame_002170 887 | scene0474_02_frame_000510 888 | scene0474_02_frame_000512 889 | scene0404_00_frame_003003 890 | scene0421_00_frame_002150 891 | scene0421_00_frame_002187 892 | scene0280_02_frame_001464 893 | scene0099_00_frame_000477 894 | scene0421_00_frame_002178 895 | scene0421_02_frame_001528 896 | scene0404_00_frame_001497 897 | scene0474_02_frame_002067 898 | scene0404_00_frame_002513 899 | scene0280_02_frame_001462 900 | scene0421_02_frame_001163 901 | scene0116_02_frame_000826 902 | scene0421_00_frame_002163 903 | scene0099_00_frame_000493 904 | scene0404_00_frame_002501 905 | scene0421_00_frame_002328 906 | scene0421_00_frame_002184 907 | scene0404_00_frame_004211 908 | scene0099_00_frame_000492 909 | scene0474_02_frame_000508 910 | scene0404_00_frame_001499 911 | scene0404_00_frame_002511 912 | scene0474_02_frame_002051 913 | scene0421_00_frame_002194 914 | scene0360_00_frame_000720 915 | scene0421_00_frame_002202 916 | scene0474_02_frame_002103 917 | scene0404_00_frame_004221 918 | scene0465_00_frame_002335 919 | scene0421_00_frame_002204 920 | scene0474_02_frame_002104 921 | scene0116_02_frame_000845 922 | scene0116_02_frame_000840 923 | scene0099_00_frame_000489 924 | scene0474_02_frame_002089 925 | scene0474_02_frame_002077 926 | scene0360_00_frame_000740 927 | scene0421_00_frame_002206 928 | scene0360_00_frame_000784 929 | scene0474_02_frame_002082 930 | scene0404_00_frame_004215 931 | scene0404_00_frame_004243 932 | scene0474_02_frame_002079 933 | scene0404_00_frame_003055 934 | scene0465_00_frame_001976 935 | scene0474_02_frame_002290 936 | scene0404_00_frame_001487 937 | scene0360_00_frame_000739 938 | scene0404_00_frame_001483 939 | scene0465_00_frame_001975 940 | scene0360_00_frame_000732 941 | scene0099_00_frame_000603 942 | scene0404_00_frame_001475 943 | scene0474_02_frame_002101 944 | scene0474_02_frame_002303 945 | scene0360_00_frame_000760 946 | scene0474_02_frame_002309 947 | scene0474_02_frame_002105 948 | scene0099_00_frame_000480 949 | scene0360_00_frame_000719 950 | scene0404_00_frame_002427 951 | scene0465_00_frame_002330 952 | scene0404_00_frame_004245 953 | scene0404_00_frame_001470 954 | scene0404_00_frame_002432 955 | scene0404_00_frame_003014 956 | scene0404_00_frame_004229 957 | scene0465_00_frame_001964 958 | scene0465_00_frame_001971 959 | scene0465_00_frame_001970 960 | scene0465_00_frame_002343 961 | scene0404_00_frame_003032 962 | scene0360_00_frame_000794 963 | scene0404_00_frame_002439 964 | scene0465_00_frame_001972 965 | scene0404_00_frame_003042 966 | scene0404_00_frame_002444 967 | scene0465_00_frame_002345 968 | scene0465_00_frame_002344 969 | scene0474_02_frame_002095 970 | scene0404_00_frame_003016 971 | scene0099_00_frame_000650 972 | scene0099_00_frame_000610 973 | scene0099_00_frame_000609 974 | scene0421_00_frame_002320 975 | scene0404_00_frame_003018 976 | scene0465_00_frame_002369 977 | scene0404_00_frame_003047 978 | scene0421_00_frame_002226 979 | scene0421_00_frame_002212 980 | scene0465_00_frame_002349 981 | scene0404_00_frame_004227 982 | scene0404_00_frame_004235 983 | scene0404_00_frame_003013 984 | scene0360_00_frame_000800 985 | scene0404_00_frame_003034 986 | scene0404_00_frame_002487 987 | scene0404_00_frame_003029 988 | scene0099_00_frame_000775 989 | scene0404_00_frame_003011 990 | scene0465_00_frame_002365 991 | scene0360_00_frame_000795 992 | scene0404_00_frame_003028 993 | scene0404_00_frame_002434 994 | scene0421_00_frame_002270 995 | scene0404_00_frame_002450 996 | scene0099_00_frame_000648 997 | scene0421_00_frame_002228 998 | scene0404_00_frame_002470 999 | scene0404_00_frame_002451 1000 | scene0421_00_frame_002271 1001 | scene0465_00_frame_002358 1002 | scene0465_00_frame_002364 1003 | scene0360_00_frame_000812 1004 | scene0099_00_frame_000619 1005 | scene0404_00_frame_002457 1006 | scene0421_00_frame_002237 1007 | scene0404_00_frame_002455 1008 | scene0099_00_frame_000782 1009 | scene0421_00_frame_002242 1010 | scene0099_00_frame_000647 1011 | scene0099_00_frame_000621 1012 | scene0404_00_frame_002465 1013 | scene0404_00_frame_002477 1014 | scene0421_00_frame_002261 1015 | scene0099_00_frame_000620 1016 | scene0099_00_frame_000780 1017 | scene0421_00_frame_002265 1018 | scene0421_00_frame_002272 1019 | scene0421_00_frame_002232 1020 | scene0404_00_frame_002453 1021 | scene0421_00_frame_002250 1022 | scene0404_00_frame_002474 1023 | scene0421_00_frame_002269 1024 | scene0421_00_frame_002266 1025 | scene0421_00_frame_002229 1026 | scene0421_00_frame_002254 1027 | scene0360_00_frame_000830 1028 | scene0404_00_frame_002480 1029 | scene0421_00_frame_002263 1030 | scene0421_00_frame_002255 1031 | scene0404_00_frame_002473 1032 | scene0099_00_frame_000795 1033 | scene0360_00_frame_000831 1034 | scene0360_00_frame_000825 1035 | scene0099_00_frame_000626 1036 | scene0360_00_frame_000820 1037 | scene0099_00_frame_000783 1038 | scene0421_00_frame_002296 1039 | scene0099_00_frame_000787 1040 | scene0421_00_frame_002282 1041 | scene0421_00_frame_002307 1042 | scene0360_00_frame_000824 1043 | scene0421_00_frame_002276 1044 | scene0421_00_frame_002312 1045 | scene0421_00_frame_002308 1046 | scene0421_00_frame_002299 1047 | scene0421_00_frame_002292 1048 | scene0421_00_frame_002295 1049 | scene0421_00_frame_002297 1050 | scene0360_00_frame_000816 1051 | scene0421_00_frame_002290 1052 | scene0421_00_frame_002293 1053 | scene0421_00_frame_002285 1054 | scene0421_00_frame_002281 1055 | scene0360_00_frame_000815 1056 | scene0421_00_frame_002302 1057 | scene0099_00_frame_000784 1058 | scene0360_00_frame_000822 1059 | scene0421_00_frame_002283 1060 | scene0421_00_frame_002288 1061 | scene0421_00_frame_002274 1062 | scene0421_00_frame_002278 1063 | scene0421_00_frame_002301 1064 | scene0099_00_frame_000631 1065 | scene0099_00_frame_000788 1066 | scene0421_00_frame_002303 1067 | scene0360_00_frame_000818 1068 | scene0099_00_frame_000644 1069 | scene0099_00_frame_000627 1070 | scene0099_00_frame_000628 1071 | scene0099_00_frame_000792 1072 | scene0099_00_frame_000643 1073 | scene0099_00_frame_000645 1074 | scene0099_00_frame_000637 1075 | scene0099_00_frame_000638 1076 | scene0099_00_frame_000634 1077 | scene0099_00_frame_000635 1078 | scene0099_00_frame_000639 1079 | scene0099_00_frame_000636 1080 | scene0099_00_frame_000633 1081 | -------------------------------------------------------------------------------- /dataset/ScanNet/ScanNet_testing_restricted_1080.txt: -------------------------------------------------------------------------------- 1 | scene0206_02_frame_001952 2 | scene0390_00_frame_000359 3 | scene0109_00_frame_000715 4 | scene0390_00_frame_001298 5 | scene0421_00_frame_000156 6 | scene0390_00_frame_001571 7 | scene0274_01_frame_000280 8 | scene0331_01_frame_001595 9 | scene0042_01_frame_000469 10 | scene0084_01_frame_000783 11 | scene0421_02_frame_001946 12 | scene0241_02_frame_000942 13 | scene0331_01_frame_002774 14 | scene0111_01_frame_000226 15 | scene0166_02_frame_001121 16 | scene0006_00_frame_000019 17 | scene0274_01_frame_000286 18 | scene0111_01_frame_000269 19 | scene0409_01_frame_000410 20 | scene0272_00_frame_000047 21 | scene0071_00_frame_000565 22 | scene0101_01_frame_000221 23 | scene0042_01_frame_000493 24 | scene0101_01_frame_001405 25 | scene0104_00_frame_000486 26 | scene0331_01_frame_004220 27 | scene0421_02_frame_001455 28 | scene0331_01_frame_003239 29 | scene0042_01_frame_000483 30 | scene0435_00_frame_000433 31 | scene0421_02_frame_000111 32 | scene0297_00_frame_000644 33 | scene0111_01_frame_001162 34 | scene0465_00_frame_000543 35 | scene0390_00_frame_000280 36 | scene0421_00_frame_000865 37 | scene0274_01_frame_001313 38 | scene0272_00_frame_000828 39 | scene0166_02_frame_001993 40 | scene0474_02_frame_001128 41 | scene0274_01_frame_001171 42 | scene0435_00_frame_000429 43 | scene0331_01_frame_001598 44 | scene0104_00_frame_000490 45 | scene0260_00_frame_000244 46 | scene0294_00_frame_001741 47 | scene0331_01_frame_002769 48 | scene0166_02_frame_002229 49 | scene0465_00_frame_000553 50 | scene0331_01_frame_000607 51 | scene0006_00_frame_002016 52 | scene0350_00_frame_001288 53 | scene0206_02_frame_001898 54 | scene0006_00_frame_001958 55 | scene0104_00_frame_000259 56 | scene0331_01_frame_004226 57 | scene0390_00_frame_001722 58 | scene0265_00_frame_000204 59 | scene0331_01_frame_006003 60 | scene0331_01_frame_000649 61 | scene0474_02_frame_000844 62 | scene0411_02_frame_001200 63 | scene0101_01_frame_001410 64 | scene0331_01_frame_006008 65 | scene0272_00_frame_000100 66 | scene0272_00_frame_000044 67 | scene0101_01_frame_001054 68 | scene0409_01_frame_000181 69 | scene0166_02_frame_001987 70 | scene0166_02_frame_002589 71 | scene0435_00_frame_000424 72 | scene0111_01_frame_000819 73 | scene0331_01_frame_001792 74 | scene0111_01_frame_000409 75 | scene0166_02_frame_000798 76 | scene0104_00_frame_001038 77 | scene0006_00_frame_001685 78 | scene0454_00_frame_000094 79 | scene0111_01_frame_001646 80 | scene0111_01_frame_000221 81 | scene0166_02_frame_000403 82 | scene0042_01_frame_000499 83 | scene0101_01_frame_000218 84 | scene0274_01_frame_000490 85 | scene0421_02_frame_000118 86 | scene0272_00_frame_000835 87 | scene0465_00_frame_000219 88 | scene0104_00_frame_000483 89 | scene0166_02_frame_001394 90 | scene0241_02_frame_000924 91 | scene0390_00_frame_001570 92 | scene0111_01_frame_001484 93 | scene0092_03_frame_000680 94 | scene0260_00_frame_000214 95 | scene0016_01_frame_001451 96 | scene0446_01_frame_001772 97 | scene0206_02_frame_001881 98 | scene0331_01_frame_003485 99 | scene0265_00_frame_000161 100 | scene0265_00_frame_000171 101 | scene0166_02_frame_002718 102 | scene0265_00_frame_000165 103 | scene0421_02_frame_001623 104 | scene0109_00_frame_000727 105 | scene0274_01_frame_001162 106 | scene0116_02_frame_000913 107 | scene0331_01_frame_003228 108 | scene0101_01_frame_001215 109 | scene0006_00_frame_001986 110 | scene0166_02_frame_000446 111 | scene0104_00_frame_000244 112 | scene0421_02_frame_000699 113 | scene0294_00_frame_001738 114 | scene0331_01_frame_000593 115 | scene0331_01_frame_000633 116 | scene0421_00_frame_002118 117 | scene0435_00_frame_000434 118 | scene0272_00_frame_000816 119 | scene0331_01_frame_006009 120 | scene0421_02_frame_001447 121 | scene0166_02_frame_000828 122 | scene0331_01_frame_004268 123 | scene0166_02_frame_000454 124 | scene0071_00_frame_000553 125 | scene0272_00_frame_000142 126 | scene0084_01_frame_000784 127 | scene0104_00_frame_000482 128 | scene0390_00_frame_000365 129 | scene0272_00_frame_001257 130 | scene0006_00_frame_002033 131 | scene0331_01_frame_000604 132 | scene0101_01_frame_001047 133 | scene0006_00_frame_001684 134 | scene0331_01_frame_001789 135 | scene0116_02_frame_000792 136 | scene0331_01_frame_000627 137 | scene0099_00_frame_000502 138 | scene0446_01_frame_001762 139 | scene0166_02_frame_001437 140 | scene0421_02_frame_000908 141 | scene0421_02_frame_001949 142 | scene0166_02_frame_000496 143 | scene0111_01_frame_000304 144 | scene0394_01_frame_000144 145 | scene0274_01_frame_001316 146 | scene0272_00_frame_000137 147 | scene0331_01_frame_001592 148 | scene0070_00_frame_001012 149 | scene0104_00_frame_001019 150 | scene0084_01_frame_000806 151 | scene0390_00_frame_000896 152 | scene0241_02_frame_000870 153 | scene0116_02_frame_000910 154 | scene0421_00_frame_000862 155 | scene0206_02_frame_000071 156 | scene0390_00_frame_000870 157 | scene0111_01_frame_001816 158 | scene0166_02_frame_000814 159 | scene0166_02_frame_002503 160 | scene0206_02_frame_001944 161 | scene0109_00_frame_000710 162 | scene0435_00_frame_002432 163 | scene0331_01_frame_001593 164 | scene0092_03_frame_000634 165 | scene0166_02_frame_001110 166 | scene0109_00_frame_000726 167 | scene0274_01_frame_000482 168 | scene0409_01_frame_000353 169 | scene0264_00_frame_001518 170 | scene0111_01_frame_000245 171 | scene0390_00_frame_000316 172 | scene0166_02_frame_000822 173 | scene0116_02_frame_000917 174 | scene0474_02_frame_002369 175 | scene0104_00_frame_000273 176 | scene0272_00_frame_000823 177 | scene0303_00_frame_001233 178 | scene0272_00_frame_000810 179 | scene0116_02_frame_000902 180 | scene0070_00_frame_000997 181 | scene0111_01_frame_000220 182 | scene0166_02_frame_001143 183 | scene0421_00_frame_000146 184 | scene0042_01_frame_000501 185 | scene0421_02_frame_001448 186 | scene0166_02_frame_002283 187 | scene0421_00_frame_002113 188 | scene0101_01_frame_001406 189 | scene0454_00_frame_000390 190 | scene0144_00_frame_000258 191 | scene0409_01_frame_000619 192 | scene0241_02_frame_000915 193 | scene0111_01_frame_000801 194 | scene0166_02_frame_000393 195 | scene0260_00_frame_000220 196 | scene0241_02_frame_000526 197 | scene0331_01_frame_001583 198 | scene0092_03_frame_000629 199 | scene0116_02_frame_000976 200 | scene0042_01_frame_000465 201 | scene0006_00_frame_002001 202 | scene0206_02_frame_001490 203 | scene0101_01_frame_001059 204 | scene0166_02_frame_000487 205 | scene0421_02_frame_001454 206 | scene0411_02_frame_001204 207 | scene0390_00_frame_000281 208 | scene0390_00_frame_000388 209 | scene0408_00_frame_000098 210 | scene0195_00_frame_001246 211 | scene0166_02_frame_002501 212 | scene0390_00_frame_000384 213 | scene0347_00_frame_001063 214 | scene0390_00_frame_000319 215 | scene0331_01_frame_003488 216 | scene0042_01_frame_000473 217 | scene0111_01_frame_001830 218 | scene0116_02_frame_000968 219 | scene0331_01_frame_003195 220 | scene0029_00_frame_000126 221 | scene0465_00_frame_005024 222 | scene0166_02_frame_002468 223 | scene0390_00_frame_000338 224 | scene0421_00_frame_001422 225 | scene0166_02_frame_002476 226 | scene0101_01_frame_001056 227 | scene0195_00_frame_001234 228 | scene0421_02_frame_001449 229 | scene0265_00_frame_000207 230 | scene0421_02_frame_000475 231 | scene0206_02_frame_001946 232 | scene0166_02_frame_000493 233 | scene0101_01_frame_000215 234 | scene0408_00_frame_000086 235 | scene0394_01_frame_000147 236 | scene0454_00_frame_000425 237 | scene0004_00_frame_000380 238 | scene0390_00_frame_000391 239 | scene0166_02_frame_000801 240 | scene0421_02_frame_000471 241 | scene0265_00_frame_000191 242 | scene0206_02_frame_001897 243 | scene0084_01_frame_000779 244 | scene0390_00_frame_000865 245 | scene0331_01_frame_001585 246 | scene0029_00_frame_001768 247 | scene0421_02_frame_000903 248 | scene0409_01_frame_000357 249 | scene0465_00_frame_000530 250 | scene0166_02_frame_001123 251 | scene0331_01_frame_000588 252 | scene0006_00_frame_001987 253 | scene0474_02_frame_001599 254 | scene0274_01_frame_001200 255 | scene0421_00_frame_000893 256 | scene0390_00_frame_000304 257 | scene0206_02_frame_000065 258 | scene0421_00_frame_001426 259 | scene0116_02_frame_000789 260 | scene0331_01_frame_003484 261 | scene0274_01_frame_001360 262 | scene0408_00_frame_000089 263 | scene0274_01_frame_000304 264 | scene0394_01_frame_000140 265 | scene0166_02_frame_002223 266 | scene0070_00_frame_000995 267 | scene0104_00_frame_000251 268 | scene0029_00_frame_001703 269 | scene0272_00_frame_000105 270 | scene0166_02_frame_001132 271 | scene0116_02_frame_000701 272 | scene0331_01_frame_000624 273 | scene0071_00_frame_000587 274 | scene0421_00_frame_000864 275 | scene0195_00_frame_001228 276 | scene0206_02_frame_001909 277 | scene0394_01_frame_000142 278 | scene0101_01_frame_000211 279 | scene0071_00_frame_000563 280 | scene0111_01_frame_000276 281 | scene0421_00_frame_001999 282 | scene0421_02_frame_001463 283 | scene0111_01_frame_001476 284 | scene0241_02_frame_000949 285 | scene0409_01_frame_000037 286 | scene0111_01_frame_000252 287 | scene0272_00_frame_000121 288 | scene0116_02_frame_000790 289 | scene0409_01_frame_000358 290 | scene0071_00_frame_000551 291 | scene0474_02_frame_002372 292 | scene0297_00_frame_000638 293 | scene0297_00_frame_000648 294 | scene0016_01_frame_001447 295 | scene0390_00_frame_000345 296 | scene0435_00_frame_002431 297 | scene0070_00_frame_000001 298 | scene0111_01_frame_000403 299 | scene0474_02_frame_002375 300 | scene0331_01_frame_003462 301 | scene0290_00_frame_000333 302 | scene0331_01_frame_003190 303 | scene0474_02_frame_001594 304 | scene0274_01_frame_001194 305 | scene0265_00_frame_001383 306 | scene0331_01_frame_004266 307 | scene0272_00_frame_001264 308 | scene0104_00_frame_000274 309 | scene0111_01_frame_000285 310 | scene0029_00_frame_001771 311 | scene0166_02_frame_002721 312 | scene0092_03_frame_000843 313 | scene0166_02_frame_000805 314 | scene0272_00_frame_000124 315 | scene0303_00_frame_001226 316 | scene0421_02_frame_001950 317 | scene0084_01_frame_000789 318 | scene0104_00_frame_001031 319 | scene0294_00_frame_001743 320 | scene0421_00_frame_001424 321 | scene0274_01_frame_001163 322 | scene0101_01_frame_001235 323 | scene0421_02_frame_001634 324 | scene0111_01_frame_001494 325 | scene0331_01_frame_003472 326 | scene0421_02_frame_000116 327 | scene0104_00_frame_000229 328 | scene0166_02_frame_000802 329 | scene0265_00_frame_000194 330 | scene0408_00_frame_000071 331 | scene0390_00_frame_001568 332 | scene0104_00_frame_001026 333 | scene0274_01_frame_001324 334 | scene0084_01_frame_000809 335 | scene0101_01_frame_000207 336 | scene0166_02_frame_001160 337 | scene0265_00_frame_000193 338 | scene0265_00_frame_000163 339 | scene0274_01_frame_001175 340 | scene0347_00_frame_000847 341 | scene0116_02_frame_000907 342 | scene0390_00_frame_000366 343 | scene0331_01_frame_001599 344 | scene0084_01_frame_000790 345 | scene0331_01_frame_003476 346 | scene0435_03_frame_000233 347 | scene0421_00_frame_001737 348 | scene0144_00_frame_000291 349 | scene0465_00_frame_000549 350 | scene0092_03_frame_000688 351 | scene0390_00_frame_000349 352 | scene0331_01_frame_003216 353 | scene0104_00_frame_000258 354 | scene0409_01_frame_000416 355 | scene0111_01_frame_001143 356 | scene0274_01_frame_000288 357 | scene0297_00_frame_000646 358 | scene0166_02_frame_002691 359 | scene0435_00_frame_002376 360 | scene0166_02_frame_002706 361 | scene0435_00_frame_000431 362 | scene0260_00_frame_000236 363 | scene0421_00_frame_000603 364 | scene0408_00_frame_000093 365 | scene0421_00_frame_002020 366 | scene0117_00_frame_000290 367 | scene0206_02_frame_001488 368 | scene0006_00_frame_001951 369 | scene0084_01_frame_000775 370 | scene0241_02_frame_000955 371 | scene0331_01_frame_001581 372 | scene0206_02_frame_001880 373 | scene0331_01_frame_000619 374 | scene0092_03_frame_000623 375 | scene0274_01_frame_000287 376 | scene0104_00_frame_000253 377 | scene0166_02_frame_002221 378 | scene0166_02_frame_002636 379 | scene0331_01_frame_003218 380 | scene0092_03_frame_000840 381 | scene0104_00_frame_000504 382 | scene0331_01_frame_000632 383 | scene0166_02_frame_001120 384 | scene0331_01_frame_001577 385 | scene0390_00_frame_001577 386 | scene0241_02_frame_000950 387 | scene0390_00_frame_000309 388 | scene0111_01_frame_001474 389 | scene0241_02_frame_000945 390 | scene0101_01_frame_001076 391 | scene0454_00_frame_000098 392 | scene0331_01_frame_003199 393 | scene0394_01_frame_000148 394 | scene0195_00_frame_001244 395 | scene0465_00_frame_000217 396 | scene0347_00_frame_000849 397 | scene0474_02_frame_000921 398 | scene0070_00_frame_001004 399 | scene0166_02_frame_000841 400 | scene0272_00_frame_000034 401 | scene0390_00_frame_000395 402 | scene0016_01_frame_001443 403 | scene0111_01_frame_000170 404 | scene0101_01_frame_000212 405 | scene0111_01_frame_000822 406 | scene0109_00_frame_000756 407 | scene0117_00_frame_000292 408 | scene0347_00_frame_001050 409 | scene0101_01_frame_000217 410 | scene0260_00_frame_001000 411 | scene0109_00_frame_000728 412 | scene0166_02_frame_000810 413 | scene0092_03_frame_000839 414 | scene0274_01_frame_001295 415 | scene0331_01_frame_000591 416 | scene0465_00_frame_000919 417 | scene0474_02_frame_001601 418 | scene0101_01_frame_001075 419 | scene0109_00_frame_000740 420 | scene0421_00_frame_001427 421 | scene0272_00_frame_000122 422 | scene0331_01_frame_003223 423 | scene0029_00_frame_001772 424 | scene0331_01_frame_000578 425 | scene0101_01_frame_000824 426 | scene0274_01_frame_001317 427 | scene0104_00_frame_000252 428 | scene0101_01_frame_000821 429 | scene0421_00_frame_002010 430 | scene0421_02_frame_000477 431 | scene0272_00_frame_000118 432 | scene0071_00_frame_000607 433 | scene0421_02_frame_000862 434 | scene0111_01_frame_000271 435 | scene0454_00_frame_000095 436 | scene0331_01_frame_004221 437 | scene0274_01_frame_000468 438 | scene0101_01_frame_001210 439 | scene0111_01_frame_001167 440 | scene0166_02_frame_001399 441 | scene0116_02_frame_000915 442 | scene0101_01_frame_000116 443 | scene0166_02_frame_000441 444 | scene0274_01_frame_001304 445 | scene0421_02_frame_000165 446 | scene0166_02_frame_002226 447 | scene0331_01_frame_000636 448 | scene0331_01_frame_001787 449 | scene0409_01_frame_000372 450 | scene0070_00_frame_000989 451 | scene0042_01_frame_000487 452 | scene0446_01_frame_001766 453 | scene0421_02_frame_000472 454 | scene0241_02_frame_000897 455 | scene0116_02_frame_000914 456 | scene0421_00_frame_000871 457 | scene0260_00_frame_000249 458 | scene0409_01_frame_000224 459 | scene0421_00_frame_001419 460 | scene0331_01_frame_000637 461 | scene0042_01_frame_000457 462 | scene0274_01_frame_001191 463 | scene0104_00_frame_001020 464 | scene0116_02_frame_000980 465 | scene0116_02_frame_000979 466 | scene0144_00_frame_000268 467 | scene0071_00_frame_000588 468 | scene0272_00_frame_000126 469 | scene0016_01_frame_001438 470 | scene0272_00_frame_000817 471 | scene0272_00_frame_000131 472 | scene0409_01_frame_000359 473 | scene0274_01_frame_000485 474 | scene0111_01_frame_000834 475 | scene0390_00_frame_000341 476 | scene0166_02_frame_002464 477 | scene0070_00_frame_001000 478 | scene0390_00_frame_001563 479 | scene0111_01_frame_000268 480 | scene0166_02_frame_001084 481 | scene0071_00_frame_000586 482 | scene0144_00_frame_000269 483 | scene0260_00_frame_000222 484 | scene0294_00_frame_001765 485 | scene0435_00_frame_000436 486 | scene0166_02_frame_001990 487 | scene0421_02_frame_001456 488 | scene0166_02_frame_000806 489 | scene0104_00_frame_000220 490 | scene0006_00_frame_001995 491 | scene0274_01_frame_000454 492 | scene0144_00_frame_000283 493 | scene0421_00_frame_001288 494 | scene0331_01_frame_001596 495 | scene0241_02_frame_000529 496 | scene0421_00_frame_000863 497 | scene0350_01_frame_000273 498 | scene0465_00_frame_000547 499 | scene0274_01_frame_000489 500 | scene0421_02_frame_000915 501 | scene0104_00_frame_001035 502 | scene0331_01_frame_003470 503 | scene0421_00_frame_001289 504 | scene0390_00_frame_000284 505 | scene0274_01_frame_000283 506 | scene0274_01_frame_001190 507 | scene0265_00_frame_000184 508 | scene0350_00_frame_001296 509 | scene0166_02_frame_002641 510 | scene0390_00_frame_000895 511 | scene0331_01_frame_000581 512 | scene0070_00_frame_001261 513 | scene0331_01_frame_006012 514 | scene0070_00_frame_000985 515 | scene0006_00_frame_000028 516 | scene0474_02_frame_001985 517 | scene0111_01_frame_000257 518 | scene0048_00_frame_000051 519 | scene0073_03_frame_000663 520 | scene0408_00_frame_000070 521 | scene0421_00_frame_002121 522 | scene0421_00_frame_002005 523 | scene0421_00_frame_002120 524 | scene0474_02_frame_001597 525 | scene0016_01_frame_001446 526 | scene0071_00_frame_000589 527 | scene0303_00_frame_001231 528 | scene0206_02_frame_001489 529 | scene0166_02_frame_000836 530 | scene0073_03_frame_000660 531 | scene0111_01_frame_000239 532 | scene0260_00_frame_000210 533 | scene0111_01_frame_001648 534 | scene0350_01_frame_000279 535 | scene0241_02_frame_000890 536 | scene0166_02_frame_002702 537 | scene0390_00_frame_000891 538 | scene0166_02_frame_002594 539 | scene0042_01_frame_000461 540 | scene0260_00_frame_000238 541 | scene0465_00_frame_000927 542 | scene0241_02_frame_000935 543 | scene0421_00_frame_000563 544 | scene0411_02_frame_001205 545 | scene0101_01_frame_000205 546 | scene0166_02_frame_002584 547 | scene0136_01_frame_000295 548 | scene0421_02_frame_000691 549 | scene0390_00_frame_000878 550 | scene0111_01_frame_000817 551 | scene0241_02_frame_000911 552 | scene0071_00_frame_000556 553 | scene0029_00_frame_000679 554 | scene0421_00_frame_002002 555 | scene0350_01_frame_000274 556 | scene0111_01_frame_000809 557 | scene0421_02_frame_001630 558 | scene0166_02_frame_000439 559 | scene0303_02_frame_000774 560 | scene0104_00_frame_000234 561 | scene0474_02_frame_001316 562 | scene0004_00_frame_000386 563 | scene0144_00_frame_000284 564 | scene0006_00_frame_002004 565 | scene0454_00_frame_000055 566 | scene0265_00_frame_000196 567 | scene0409_01_frame_000626 568 | scene0409_01_frame_000202 569 | scene0465_00_frame_000541 570 | scene0280_02_frame_001080 571 | scene0390_00_frame_000893 572 | scene0260_00_frame_000215 573 | scene0070_00_frame_000994 574 | scene0101_01_frame_001073 575 | scene0241_02_frame_000913 576 | scene0048_00_frame_000048 577 | scene0331_01_frame_001565 578 | scene0331_01_frame_000562 579 | scene0474_02_frame_001987 580 | scene0390_00_frame_000351 581 | scene0394_01_frame_000146 582 | scene0029_00_frame_000606 583 | scene0144_00_frame_000287 584 | scene0265_00_frame_000162 585 | scene0303_02_frame_000775 586 | scene0099_00_frame_000581 587 | scene0166_02_frame_001140 588 | scene0006_00_frame_001998 589 | scene0260_00_frame_000245 590 | scene0166_02_frame_000447 591 | scene0101_01_frame_000271 592 | scene0144_00_frame_000288 593 | scene0104_00_frame_000260 594 | scene0274_01_frame_000292 595 | scene0241_02_frame_000896 596 | scene0166_02_frame_002689 597 | scene0421_02_frame_001457 598 | scene0303_00_frame_001228 599 | scene0111_01_frame_000244 600 | scene0421_02_frame_001953 601 | scene0144_00_frame_000266 602 | scene0421_02_frame_000694 603 | scene0070_00_frame_000992 604 | scene0274_01_frame_001294 605 | scene0331_01_frame_001768 606 | scene0101_01_frame_000220 607 | scene0111_01_frame_000397 608 | scene0421_00_frame_001420 609 | scene0390_00_frame_000392 610 | scene0111_01_frame_001835 611 | scene0260_00_frame_001296 612 | scene0331_01_frame_003461 613 | scene0474_02_frame_000279 614 | scene0350_01_frame_000280 615 | scene0421_02_frame_000696 616 | scene0206_02_frame_001873 617 | scene0421_00_frame_001741 618 | scene0116_02_frame_000970 619 | scene0331_01_frame_004218 620 | scene0390_00_frame_000337 621 | scene0421_00_frame_002024 622 | scene0101_01_frame_000732 623 | scene0071_00_frame_000609 624 | scene0272_00_frame_001261 625 | scene0104_00_frame_001030 626 | scene0101_01_frame_000208 627 | scene0465_00_frame_005021 628 | scene0116_02_frame_000903 629 | scene0331_01_frame_004271 630 | scene0195_00_frame_001239 631 | scene0071_00_frame_000561 632 | scene0241_02_frame_001270 633 | scene0111_01_frame_000813 634 | scene0390_00_frame_000279 635 | scene0092_03_frame_000637 636 | scene0421_02_frame_001460 637 | scene0454_00_frame_000059 638 | scene0446_01_frame_001770 639 | scene0274_01_frame_000464 640 | scene0206_02_frame_001904 641 | scene0166_02_frame_000839 642 | scene0111_01_frame_001170 643 | scene0111_01_frame_000812 644 | scene0084_01_frame_000793 645 | scene0111_01_frame_000818 646 | scene0409_01_frame_000415 647 | scene0260_00_frame_000259 648 | scene0101_01_frame_001057 649 | scene0101_01_frame_001071 650 | scene0390_00_frame_000401 651 | scene0390_00_frame_001567 652 | scene0421_02_frame_000114 653 | scene0421_00_frame_002003 654 | scene0350_00_frame_001290 655 | scene0166_02_frame_000467 656 | scene0006_00_frame_001688 657 | scene0331_01_frame_000576 658 | scene0421_02_frame_001624 659 | scene0166_02_frame_000824 660 | scene0070_00_frame_000981 661 | scene0265_00_frame_000177 662 | scene0421_00_frame_000158 663 | scene0029_00_frame_000128 664 | scene0421_00_frame_001730 665 | scene0331_01_frame_000590 666 | scene0116_02_frame_000922 667 | scene0071_00_frame_000606 668 | scene0421_00_frame_002028 669 | scene0350_01_frame_000281 670 | scene0111_01_frame_001653 671 | scene0454_00_frame_000385 672 | scene0166_02_frame_001117 673 | scene0104_00_frame_001015 674 | scene0390_00_frame_000320 675 | scene0274_01_frame_001185 676 | scene0409_01_frame_000618 677 | scene0408_00_frame_000087 678 | scene0331_01_frame_000567 679 | scene0409_01_frame_000032 680 | scene0136_01_frame_000240 681 | scene0421_02_frame_000864 682 | scene0166_02_frame_002585 683 | scene0101_01_frame_001233 684 | scene0331_01_frame_000645 685 | scene0446_01_frame_001761 686 | scene0166_02_frame_001973 687 | scene0294_00_frame_001737 688 | scene0331_01_frame_000557 689 | scene0006_00_frame_001695 690 | scene0101_01_frame_000118 691 | scene0435_00_frame_000426 692 | scene0104_00_frame_000491 693 | scene0070_00_frame_001264 694 | scene0421_02_frame_000867 695 | scene0274_01_frame_001325 696 | scene0166_02_frame_000448 697 | scene0104_00_frame_000497 698 | scene0272_00_frame_000827 699 | scene0206_02_frame_000064 700 | scene0092_03_frame_000846 701 | scene0331_01_frame_001587 702 | scene0048_00_frame_000042 703 | scene0421_00_frame_001418 704 | scene0390_00_frame_000361 705 | scene0166_02_frame_001148 706 | scene0042_01_frame_000489 707 | scene0084_01_frame_000410 708 | scene0274_01_frame_000465 709 | scene0042_01_frame_000463 710 | scene0042_01_frame_000491 711 | scene0071_00_frame_000562 712 | scene0274_01_frame_000456 713 | scene0435_00_frame_000420 714 | scene0016_01_frame_001450 715 | scene0421_02_frame_000906 716 | scene0294_00_frame_001783 717 | scene0206_02_frame_001496 718 | scene0101_01_frame_000265 719 | scene0111_01_frame_000816 720 | scene0241_02_frame_000953 721 | scene0101_01_frame_000373 722 | scene0206_02_frame_001876 723 | scene0286_03_frame_000642 724 | scene0195_00_frame_001231 725 | scene0350_01_frame_000275 726 | scene0029_00_frame_000607 727 | scene0101_01_frame_000282 728 | scene0241_02_frame_000921 729 | scene0101_01_frame_001238 730 | scene0274_01_frame_001296 731 | scene0016_01_frame_001432 732 | scene0454_00_frame_000092 733 | scene0260_00_frame_000243 734 | scene0409_01_frame_000193 735 | scene0421_00_frame_002012 736 | scene0101_01_frame_000277 737 | scene0421_00_frame_000153 738 | scene0101_01_frame_000275 739 | scene0006_00_frame_001963 740 | scene0042_01_frame_000472 741 | scene0241_02_frame_000540 742 | scene0390_00_frame_000362 743 | scene0104_00_frame_000250 744 | scene0206_02_frame_001494 745 | scene0421_02_frame_000859 746 | scene0331_01_frame_002761 747 | scene0331_01_frame_004272 748 | scene0101_01_frame_000344 749 | scene0409_01_frame_000197 750 | scene0166_02_frame_001125 751 | scene0166_02_frame_002701 752 | scene0454_00_frame_000056 753 | scene0111_01_frame_000229 754 | scene0166_02_frame_002469 755 | scene0350_01_frame_000271 756 | scene0465_00_frame_000540 757 | scene0274_01_frame_001318 758 | scene0206_02_frame_000522 759 | scene0274_01_frame_000481 760 | scene0331_01_frame_000560 761 | scene0004_00_frame_000368 762 | scene0111_01_frame_001823 763 | scene0042_01_frame_000462 764 | scene0111_01_frame_001814 765 | scene0421_00_frame_000564 766 | scene0390_00_frame_000275 767 | scene0241_02_frame_000926 768 | scene0331_01_frame_004206 769 | scene0274_01_frame_000307 770 | scene0166_02_frame_000390 771 | scene0274_01_frame_001297 772 | scene0104_00_frame_000257 773 | scene0166_02_frame_000469 774 | scene0048_00_frame_000057 775 | scene0408_00_frame_000068 776 | scene0280_02_frame_000244 777 | scene0409_01_frame_000412 778 | scene0421_00_frame_002016 779 | scene0116_02_frame_000912 780 | scene0390_00_frame_001562 781 | scene0166_02_frame_001088 782 | scene0274_01_frame_000486 783 | scene0101_01_frame_001407 784 | scene0241_02_frame_000922 785 | scene0421_02_frame_000876 786 | scene0070_00_frame_000000 787 | scene0331_01_frame_004269 788 | scene0104_00_frame_000505 789 | scene0101_01_frame_001409 790 | scene0350_01_frame_000272 791 | scene0071_00_frame_000595 792 | scene0104_00_frame_001027 793 | scene0109_00_frame_000735 794 | scene0092_03_frame_000687 795 | scene0331_01_frame_001766 796 | scene0104_00_frame_000256 797 | scene0166_02_frame_000796 798 | scene0101_01_frame_000185 799 | scene0109_00_frame_000753 800 | scene0421_00_frame_002004 801 | scene0331_01_frame_000558 802 | scene0241_02_frame_000873 803 | scene0421_02_frame_001626 804 | scene0073_03_frame_000666 805 | scene0409_01_frame_000182 806 | scene0465_00_frame_000536 807 | scene0421_00_frame_000891 808 | scene0166_02_frame_001405 809 | scene0071_00_frame_000559 810 | scene0166_02_frame_002710 811 | scene0331_01_frame_003238 812 | scene0104_00_frame_000261 813 | scene0421_00_frame_000152 814 | scene0111_01_frame_000235 815 | scene0111_01_frame_001821 816 | scene0474_02_frame_000291 817 | scene0166_02_frame_000817 818 | scene0331_01_frame_000577 819 | scene0421_00_frame_000892 820 | scene0331_01_frame_001582 821 | scene0016_01_frame_001445 822 | scene0421_02_frame_000916 823 | scene0409_01_frame_000206 824 | scene0474_02_frame_001986 825 | scene0331_01_frame_000611 826 | scene0265_00_frame_000206 827 | scene0390_00_frame_000389 828 | scene0454_00_frame_000387 829 | scene0116_02_frame_000967 830 | scene0206_02_frame_001884 831 | scene0390_00_frame_000334 832 | scene0421_00_frame_000602 833 | scene0166_02_frame_002600 834 | scene0421_02_frame_001627 835 | scene0166_02_frame_001162 836 | scene0101_01_frame_001411 837 | scene0331_01_frame_003474 838 | scene0104_00_frame_000507 839 | scene0166_02_frame_002624 840 | scene0104_00_frame_000320 841 | scene0331_01_frame_001765 842 | scene0166_02_frame_002705 843 | scene0092_03_frame_000684 844 | scene0274_01_frame_000289 845 | scene0111_01_frame_000805 846 | scene0111_01_frame_001145 847 | scene0350_01_frame_000254 848 | scene0421_02_frame_000880 849 | scene0111_01_frame_000405 850 | scene0408_00_frame_000072 851 | scene0465_00_frame_000554 852 | scene0272_00_frame_000115 853 | scene0347_00_frame_001062 854 | scene0390_00_frame_000271 855 | scene0101_01_frame_000214 856 | scene0111_01_frame_000799 857 | scene0390_00_frame_000367 858 | scene0166_02_frame_001116 859 | scene0206_02_frame_001872 860 | scene0331_01_frame_004390 861 | scene0331_01_frame_003473 862 | scene0111_01_frame_000832 863 | scene0166_02_frame_000838 864 | scene0274_01_frame_000267 865 | scene0166_02_frame_000470 866 | scene0101_01_frame_000206 867 | scene0073_03_frame_000659 868 | scene0084_01_frame_000792 869 | scene0109_00_frame_000716 870 | scene0241_02_frame_000934 871 | scene0111_01_frame_000172 872 | scene0111_01_frame_000811 873 | scene0474_02_frame_000550 874 | scene0101_01_frame_000109 875 | scene0272_00_frame_001265 876 | scene0166_02_frame_001392 877 | scene0029_00_frame_001710 878 | scene0206_02_frame_001895 879 | scene0331_01_frame_003227 880 | scene0272_00_frame_000841 881 | scene0265_00_frame_000195 882 | scene0195_00_frame_001236 883 | scene0474_02_frame_000290 884 | scene0111_01_frame_000831 885 | scene0006_00_frame_000017 886 | scene0166_02_frame_001114 887 | scene0166_02_frame_002281 888 | scene0331_01_frame_004234 889 | scene0006_00_frame_001949 890 | scene0070_00_frame_001006 891 | scene0421_02_frame_000692 892 | scene0421_00_frame_002090 893 | scene0101_01_frame_001048 894 | scene0435_00_frame_002375 895 | scene0331_01_frame_000552 896 | scene0166_02_frame_001409 897 | scene0274_01_frame_000455 898 | scene0048_00_frame_000053 899 | scene0111_01_frame_001493 900 | scene0390_00_frame_000394 901 | scene0166_02_frame_002621 902 | scene0274_01_frame_000473 903 | scene0294_00_frame_001786 904 | scene0166_02_frame_002224 905 | scene0084_01_frame_000795 906 | scene0004_00_frame_000384 907 | scene0111_01_frame_001144 908 | scene0350_01_frame_000276 909 | scene0206_02_frame_001950 910 | scene0070_00_frame_001002 911 | scene0101_01_frame_000377 912 | scene0071_00_frame_000564 913 | scene0272_00_frame_000136 914 | scene0331_01_frame_005999 915 | scene0454_00_frame_000389 916 | scene0206_02_frame_001943 917 | scene0116_02_frame_000906 918 | scene0350_00_frame_001294 919 | scene0409_01_frame_000209 920 | scene0006_00_frame_000032 921 | scene0092_03_frame_000849 922 | scene0166_02_frame_001980 923 | scene0111_01_frame_001480 924 | scene0116_02_frame_000702 925 | scene0104_00_frame_000515 926 | scene0390_00_frame_000256 927 | scene0111_01_frame_001819 928 | scene0331_01_frame_002770 929 | scene0166_02_frame_000485 930 | scene0474_02_frame_001318 931 | scene0421_02_frame_000866 932 | scene0421_00_frame_000142 933 | scene0029_00_frame_001766 934 | scene0166_02_frame_000450 935 | scene0241_02_frame_000530 936 | scene0166_02_frame_001135 937 | scene0265_00_frame_000179 938 | scene0274_01_frame_000298 939 | scene0092_03_frame_000631 940 | scene0144_00_frame_000280 941 | scene0104_00_frame_000271 942 | scene0104_00_frame_000241 943 | scene0104_00_frame_000223 944 | scene0117_00_frame_000294 945 | scene0166_02_frame_002632 946 | scene0260_00_frame_000211 947 | scene0394_01_frame_000141 948 | scene0006_00_frame_002019 949 | scene0331_01_frame_006018 950 | scene0104_00_frame_000492 951 | scene0111_01_frame_000411 952 | scene0116_02_frame_000916 953 | scene0029_00_frame_000605 954 | scene0465_00_frame_000921 955 | scene0390_00_frame_000321 956 | scene0006_00_frame_002025 957 | scene0109_00_frame_000747 958 | scene0465_00_frame_005026 959 | scene0265_00_frame_000200 960 | scene0070_00_frame_001262 961 | scene0111_01_frame_001650 962 | scene0116_02_frame_000973 963 | scene0421_00_frame_000873 964 | scene0421_00_frame_000565 965 | scene0421_00_frame_000263 966 | scene0409_01_frame_000349 967 | scene0421_02_frame_001461 968 | scene0331_01_frame_002772 969 | scene0241_02_frame_000541 970 | scene0241_02_frame_000901 971 | scene0042_01_frame_000495 972 | scene0331_01_frame_003483 973 | scene0206_02_frame_001905 974 | scene0111_01_frame_000233 975 | scene0260_00_frame_000221 976 | scene0166_02_frame_000835 977 | scene0070_00_frame_001271 978 | scene0016_01_frame_001429 979 | scene0116_02_frame_000975 980 | scene0109_00_frame_000724 981 | scene0465_00_frame_000548 982 | scene0111_01_frame_000408 983 | scene0006_00_frame_001696 984 | scene0109_00_frame_000730 985 | scene0331_01_frame_001762 986 | scene0016_01_frame_001444 987 | scene0274_01_frame_001306 988 | scene0029_00_frame_001705 989 | scene0265_00_frame_000169 990 | scene0070_00_frame_000002 991 | scene0421_00_frame_000147 992 | scene0331_01_frame_000565 993 | scene0006_00_frame_000033 994 | scene0421_00_frame_002025 995 | scene0109_00_frame_000725 996 | scene0465_00_frame_000926 997 | scene0109_00_frame_000718 998 | scene0446_01_frame_001767 999 | scene0101_01_frame_000269 1000 | scene0272_00_frame_000117 1001 | scene0070_00_frame_001011 1002 | scene0421_02_frame_001464 1003 | scene0166_02_frame_002638 1004 | scene0092_03_frame_000681 1005 | scene0166_02_frame_000807 1006 | scene0048_00_frame_000049 1007 | scene0421_00_frame_001627 1008 | scene0331_01_frame_001775 1009 | scene0331_01_frame_004205 1010 | scene0286_03_frame_000645 1011 | scene0465_00_frame_000534 1012 | scene0274_01_frame_001327 1013 | scene0166_02_frame_001984 1014 | scene0286_02_frame_001201 1015 | scene0144_00_frame_000257 1016 | scene0241_02_frame_001272 1017 | scene0465_00_frame_000918 1018 | scene0272_00_frame_000839 1019 | scene0331_01_frame_002766 1020 | scene0092_03_frame_000636 1021 | scene0390_00_frame_000404 1022 | scene0421_02_frame_000907 1023 | scene0111_01_frame_000228 1024 | scene0331_01_frame_001564 1025 | scene0331_01_frame_000628 1026 | scene0029_00_frame_000680 1027 | scene0109_00_frame_000721 1028 | scene0166_02_frame_000483 1029 | scene0274_01_frame_001193 1030 | scene0116_02_frame_000918 1031 | scene0421_00_frame_000255 1032 | scene0435_03_frame_000237 1033 | scene0111_01_frame_000263 1034 | scene0166_02_frame_000466 1035 | scene0084_01_frame_000780 1036 | scene0347_00_frame_000850 1037 | scene0297_00_frame_000643 1038 | scene0206_02_frame_000072 1039 | scene0084_01_frame_000774 1040 | scene0006_00_frame_002027 1041 | scene0274_01_frame_000297 1042 | scene0350_00_frame_001292 1043 | scene0006_00_frame_000030 1044 | scene0104_00_frame_001022 1045 | scene0166_02_frame_001131 1046 | scene0111_01_frame_000246 1047 | scene0280_02_frame_000852 1048 | scene0006_00_frame_001994 1049 | scene0195_00_frame_001245 1050 | scene0042_01_frame_000458 1051 | scene0331_01_frame_003187 1052 | scene0331_01_frame_004267 1053 | scene0166_02_frame_002472 1054 | scene0331_01_frame_004415 1055 | scene0006_00_frame_001950 1056 | scene0421_02_frame_000874 1057 | scene0101_01_frame_000210 1058 | scene0331_01_frame_003197 1059 | scene0272_00_frame_001260 1060 | scene0390_00_frame_000283 1061 | scene0166_02_frame_002695 1062 | scene0111_01_frame_001812 1063 | scene0004_00_frame_000369 1064 | scene0166_02_frame_000799 1065 | scene0073_03_frame_000664 1066 | scene0331_01_frame_006016 1067 | scene0166_02_frame_001144 1068 | scene0390_00_frame_000403 1069 | scene0272_00_frame_000112 1070 | scene0241_02_frame_000952 1071 | scene0166_02_frame_002588 1072 | scene0111_01_frame_001173 1073 | scene0144_00_frame_000272 1074 | scene0390_00_frame_000266 1075 | scene0390_00_frame_000876 1076 | scene0421_02_frame_000164 1077 | scene0390_00_frame_000373 1078 | scene0265_00_frame_000203 1079 | scene0104_00_frame_001034 1080 | scene0241_02_frame_000912 1081 | --------------------------------------------------------------------------------