├── .DS_Store ├── README.md ├── __pycache__ ├── hr_layers.cpython-37.pyc ├── hr_layers.cpython-38.pyc ├── kitti_utils.cpython-37.pyc ├── kitti_utils.cpython-38.pyc ├── kitti_utils.cpython-39.pyc ├── layers.cpython-37.pyc ├── layers.cpython-38.pyc ├── layers.cpython-39.pyc ├── normal_ranking_loss.cpython-38.pyc ├── options.cpython-37.pyc ├── options.cpython-38.pyc ├── test.cpython-38.pyc ├── test_dev.cpython-38.pyc ├── test_dev2.cpython-38.pyc ├── train.cpython-38.pyc ├── train.cpython-39.pyc ├── trainer.cpython-37.pyc ├── trainer.cpython-38.pyc ├── trainer_dev.cpython-38.pyc ├── trainer_dev.cpython-39.pyc ├── trainer_dev2.cpython-38.pyc ├── trainer_dev3.cpython-38.pyc ├── trainer_dev4.cpython-38.pyc ├── trainer_dev5.cpython-38.pyc ├── trainer_dev5.cpython-39.pyc ├── utils.cpython-37.pyc ├── utils.cpython-38.pyc └── utils.cpython-39.pyc ├── datasets ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── __init__.cpython-39.pyc │ ├── cityscapes_evaldataset.cpython-38.pyc │ ├── cityscapes_preprocessed_dataset.cpython-38.pyc │ ├── kitti_dataset.cpython-37.pyc │ ├── kitti_dataset.cpython-38.pyc │ ├── kitti_dataset.cpython-39.pyc │ ├── mono_dataset.cpython-37.pyc │ ├── mono_dataset.cpython-38.pyc │ ├── mono_dataset.cpython-39.pyc │ └── vk2_dataset.cpython-37.pyc ├── cityscapes_evaldataset.py ├── cityscapes_preprocessed_dataset.py ├── kitti_dataset.py └── mono_dataset.py ├── export_gt_depth.py ├── kitti_utils.py ├── layers.py ├── logs └── .DS_Store ├── networks_dev ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-38.pyc │ ├── depth_decoder.cpython-36.pyc │ ├── depth_decoder.cpython-38.pyc │ ├── depth_decoder2.cpython-38.pyc │ ├── depth_decoder3.cpython-36.pyc │ ├── depth_decoder6.cpython-36.pyc │ ├── depth_decoder6.cpython-38.pyc │ ├── depth_decoder_stu.cpython-38.pyc │ ├── layers.cpython-36.pyc │ ├── layers.cpython-38.pyc │ ├── net.cpython-36.pyc │ ├── net.cpython-38.pyc │ ├── pose_decoder.cpython-36.pyc │ ├── pose_decoder.cpython-38.pyc │ ├── pose_encoder.cpython-36.pyc │ ├── pose_encoder.cpython-38.pyc │ ├── render_utils.cpython-36.pyc │ ├── render_utils.cpython-38.pyc │ ├── resnet.cpython-36.pyc │ ├── resnet.cpython-38.pyc │ ├── resnet_encoder.cpython-36.pyc │ ├── resnet_encoder.cpython-38.pyc │ ├── utils.cpython-36.pyc │ └── utils.cpython-38.pyc ├── depth_decoder.py ├── layers.py ├── net.py ├── pose_decoder.py ├── render_utils.py ├── resnet.py ├── resnet_encoder.py └── utils.py ├── options.py ├── splits ├── .DS_Store ├── benchmark │ ├── eigen_to_benchmark_ids.npy │ ├── test_files.txt │ ├── train_files.txt │ └── val_files.txt ├── cityscapes_preprocessed │ ├── train_files.txt │ └── val_files.txt ├── eigen │ ├── .DS_Store │ ├── gt_depths.npz │ └── test_files.txt ├── eigen_benchmark │ └── test_files.txt ├── eigen_full │ ├── train_files.txt │ └── val_files.txt ├── eigen_zhou │ ├── train_files.txt │ ├── train_total_files.txt │ └── val_files.txt └── odom │ ├── test_files_09.txt │ ├── test_files_10.txt │ ├── train_files.txt │ └── val_files.txt ├── start2test.sh ├── test.py ├── test_dev.py └── utils.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/.DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [DevNet: Self-supervised Monocular Depth Learning via Density Volume Construction](https://arxiv.org/abs/2209.06351) - ECCV 2022. 2 | 3 | ## Introduction 4 | 5 | This is the PyTorch implementation of **DevNet: Self-supervised Monocular Depth Learning via Density Volume Construction**, a simple and efficient neural architecture for Self-supervised Monocular Depth Estimation. 6 | 7 | ## Setup 8 | DevNet provides support for multiple versions of Python and Torch, such as: 9 | ``` 10 | python==3.8 11 | pytorch==1.12.0 12 | torchvision==0.13.0 13 | ``` 14 | 15 | ## Data 16 | [KITTI]: To download this dataset, you can follow instruction of [MonoDepth2](https://github.com/nianticlabs/monodepth2) 17 | 18 | [KITTI Odometry]: To download this dataset, you can follow instruction of [MonoDepth2](https://github.com/nianticlabs/monodepth2) 19 | 20 | [NYU-V2]: To download this dataset, you can follow instruction of [MonoDepth2](https://github.com/nianticlabs/monodepth2) 21 | 22 | ## Running the code 23 | ### Inference 24 | You can access the trained models for partial experiments described in the paper by visiting [this link](https://drive.google.com/drive/folders/1oyQnXlQ7WqfgzfG1ApF5tAJFfRb26QXZ?usp=sharing). To store them, simply download the models and save them in the logs directory. Additionally, the inference script is named test_dev.py. 25 | - DevNet on `kitti` with `resnet18` backbone and `192 x 640` resolution: 26 | ``` 27 | sh start2test.sh 28 | ``` 29 | ### Training 30 | Necessary training details could also be found in `test_dev.py`. 31 | 32 | 33 | 34 | ## Citation 35 | If you find this code useful for your research, please cite our paper 36 | 37 | ``` 38 | @inproceedings{zhou2022devnet, 39 | title={Devnet: Self-supervised monocular depth learning via density volume construction}, 40 | author={Zhou, Kaichen and Hong, Lanqing and Chen, Changhao and Xu, Hang and Ye, Chaoqiang and Hu, Qingyong and Li, Zhenguo}, 41 | booktitle={European Conference on Computer Vision}, 42 | pages={125--142}, 43 | year={2022}, 44 | organization={Springer} 45 | } 46 | ``` 47 | ## Acknowledgements 48 | Our code partially builds on [Monodepth2](https://github.com/nianticlabs/monodepth2). 49 | 50 | -------------------------------------------------------------------------------- /__pycache__/hr_layers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/hr_layers.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/hr_layers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/hr_layers.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/kitti_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/kitti_utils.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/kitti_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/kitti_utils.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/kitti_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/kitti_utils.cpython-39.pyc -------------------------------------------------------------------------------- /__pycache__/layers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/layers.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/layers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/layers.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/layers.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/layers.cpython-39.pyc -------------------------------------------------------------------------------- /__pycache__/normal_ranking_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/normal_ranking_loss.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/options.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/options.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/options.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/test.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/test.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/test_dev.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/test_dev.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/test_dev2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/test_dev2.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/train.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/train.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/train.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/train.cpython-39.pyc -------------------------------------------------------------------------------- /__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/trainer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev.cpython-39.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev2.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev3.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev4.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev4.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev5.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev5.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/trainer_dev5.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/trainer_dev5.cpython-39.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .kitti_dataset import KITTIRAWDataset, KITTIOdomDataset, KITTIDepthDataset 2 | from .cityscapes_preprocessed_dataset import CityscapesPreprocessedDataset 3 | from .cityscapes_evaldataset import CityscapesEvalDataset 4 | -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/cityscapes_evaldataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/cityscapes_evaldataset.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/cityscapes_preprocessed_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/cityscapes_preprocessed_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/kitti_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/kitti_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/kitti_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/kitti_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/kitti_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/kitti_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/mono_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/mono_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/mono_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/mono_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/mono_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/mono_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/vk2_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/datasets/__pycache__/vk2_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/cityscapes_evaldataset.py: -------------------------------------------------------------------------------- 1 | # Copyright Niantic 2019. Patent Pending. All rights reserved. 2 | # 3 | # This software is licensed under the terms of the Monodepth2 licence 4 | # which allows for non-commercial use only, the full terms of which are made 5 | # available in the LICENSE file. 6 | 7 | import os 8 | import json 9 | import numpy as np 10 | import PIL.Image as pil 11 | 12 | from .mono_dataset import MonoDataset 13 | 14 | 15 | class CityscapesEvalDataset(MonoDataset): 16 | """Cityscapes evaluation dataset - here we are loading the raw, original images rather than 17 | preprocessed triplets, and so cropping needs to be done inside get_color. 18 | """ 19 | RAW_HEIGHT = 1024 20 | RAW_WIDTH = 2048 21 | 22 | def __init__(self, *args, **kwargs): 23 | super(CityscapesEvalDataset, self).__init__(*args, **kwargs) 24 | 25 | def index_to_folder_and_frame_idx(self, index): 26 | """Convert index in the dataset to a folder name, frame_idx and any other bits 27 | 28 | txt file is of format: 29 | aachen aachen_000000 4 30 | """ 31 | city, frame_name = self.filenames[index].split() 32 | side = None 33 | 34 | return city, frame_name, side 35 | 36 | def check_depth(self): 37 | return False 38 | 39 | def load_intrinsics(self, city, frame_name): 40 | # adapted from sfmlearner 41 | split = "test" # if self.is_train else "val" 42 | 43 | camera_file = os.path.join(self.data_path, 'camera', 44 | split, city, frame_name + '_camera.json') 45 | with open(camera_file, 'r') as f: 46 | camera = json.load(f) 47 | fx = camera['intrinsic']['fx'] 48 | fy = camera['intrinsic']['fy'] 49 | u0 = camera['intrinsic']['u0'] 50 | v0 = camera['intrinsic']['v0'] 51 | intrinsics = np.array([[fx, 0, u0, 0], 52 | [0, fy, v0, 0], 53 | [0, 0, 1, 0], 54 | [0, 0, 0, 1]]).astype(np.float32) 55 | intrinsics[0, :] /= self.RAW_WIDTH 56 | intrinsics[1, :] /= self.RAW_HEIGHT * 0.75 57 | return intrinsics 58 | 59 | def get_color(self, city, frame_name, side, do_flip, is_sequence=False): 60 | if side is not None: 61 | raise ValueError("Cityscapes dataset doesn't know how to deal with sides yet") 62 | 63 | color = self.loader(self.get_image_path(city, frame_name, side, is_sequence)) 64 | 65 | # crop down to cityscapes size 66 | w, h = color.size 67 | crop_h = h * 3 // 4 68 | color = color.crop((0, 0, w, crop_h)) 69 | 70 | if do_flip: 71 | color = color.transpose(pil.FLIP_LEFT_RIGHT) 72 | 73 | return color 74 | 75 | def get_offset_framename(self, frame_name, offset=-2): 76 | city, seq, frame_num = frame_name.split('_') 77 | 78 | frame_num = int(frame_num) + offset 79 | frame_num = str(frame_num).zfill(6) 80 | return '{}_{}_{}'.format(city, seq, frame_num) 81 | 82 | def get_colors(self, city, frame_name, side, do_flip): 83 | if side is not None: 84 | raise ValueError("Cityscapes dataset doesn't know how to deal with sides") 85 | 86 | color = self.get_color(city, frame_name, side, do_flip) 87 | 88 | prev_name = self.get_offset_framename(frame_name, offset=-2) 89 | prev_color = self.get_color(city, prev_name, side, do_flip, is_sequence=True) 90 | 91 | inputs = {} 92 | inputs[("color", 0, -1)] = color 93 | inputs[("color", -1, -1)] = prev_color 94 | 95 | return inputs 96 | 97 | def get_image_path(self, city, frame_name, side, is_sequence=False): 98 | image_path = os.path.join( 99 | self.data_path, city, frame_name + '_leftImg8bit.png') 100 | return image_path 101 | -------------------------------------------------------------------------------- /datasets/cityscapes_preprocessed_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright Niantic 2019. Patent Pending. All rights reserved. 2 | # 3 | # This software is licensed under the terms of the Monodepth2 licence 4 | # which allows for non-commercial use only, the full terms of which are made 5 | # available in the LICENSE file. 6 | import os 7 | import numpy as np 8 | import PIL.Image as pil 9 | 10 | from .mono_dataset import MonoDataset 11 | 12 | 13 | class CityscapesPreprocessedDataset(MonoDataset): 14 | """Cityscapes dataset - this expects triplets of images concatenated into a single wide image, 15 | which have had the ego car removed (bottom 25% of the image cropped) 16 | """ 17 | 18 | RAW_WIDTH = 1024 19 | RAW_HEIGHT = 384 20 | 21 | def __init__(self, *args, **kwargs): 22 | super(CityscapesPreprocessedDataset, self).__init__(*args, **kwargs) 23 | 24 | def index_to_folder_and_frame_idx(self, index): 25 | """Convert index in the dataset to a folder name, frame_idx and any other bits 26 | 27 | txt file is of format: 28 | ulm ulm_000064_000012 29 | """ 30 | city, frame_name = self.filenames[index].split() 31 | side = None 32 | return city, frame_name, side 33 | 34 | def check_depth(self): 35 | return False 36 | 37 | def load_intrinsics(self, city, frame_name): 38 | # adapted from sfmlearner 39 | 40 | camera_file = os.path.join(self.data_path, city, "{}_cam.txt".format(frame_name)) 41 | camera = np.loadtxt(camera_file, delimiter=",") 42 | fx = camera[0] 43 | fy = camera[4] 44 | u0 = camera[2] 45 | v0 = camera[5] 46 | intrinsics = np.array([[fx, 0, u0, 0], 47 | [0, fy, v0, 0], 48 | [0, 0, 1, 0], 49 | [0, 0, 0, 1]]).astype(np.float32) 50 | 51 | intrinsics[0, :] /= self.RAW_WIDTH 52 | intrinsics[1, :] /= self.RAW_HEIGHT 53 | return intrinsics 54 | 55 | def get_colors(self, city, frame_name, side, do_flip): 56 | if side is not None: 57 | raise ValueError("Cityscapes dataset doesn't know how to deal with sides") 58 | 59 | color = self.loader(self.get_image_path(city, frame_name)) 60 | color = np.array(color) 61 | 62 | w = color.shape[1] // 3 63 | inputs = {} 64 | inputs[("color", -1, -1)] = pil.fromarray(color[:, :w]) 65 | inputs[("color", 0, -1)] = pil.fromarray(color[:, w:2*w]) 66 | inputs[("color", 1, -1)] = pil.fromarray(color[:, 2*w:]) 67 | 68 | if do_flip: 69 | for key in inputs: 70 | inputs[key] = inputs[key].transpose(pil.FLIP_LEFT_RIGHT) 71 | 72 | return inputs 73 | 74 | def get_image_path(self, city, frame_name): 75 | return os.path.join(self.data_path, city, "{}.jpg".format(frame_name)) 76 | -------------------------------------------------------------------------------- /datasets/kitti_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright Niantic 2021. Patent Pending. All rights reserved. 2 | # 3 | # This software is licensed under the terms of the ManyDepth licence 4 | # which allows for non-commercial use only, the full terms of which are made 5 | # available in the LICENSE file. 6 | 7 | import os 8 | os.environ["MKL_NUM_THREADS"] = "1" # noqa F402 9 | os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402 10 | os.environ["OMP_NUM_THREADS"] = "1" # noqa F402 11 | import skimage.transform 12 | import numpy as np 13 | import PIL.Image as pil 14 | 15 | from .mono_dataset import MonoDataset 16 | 17 | 18 | class KITTIDataset(MonoDataset): 19 | """Superclass for different types of KITTI dataset loaders 20 | """ 21 | def __init__(self, *args, **kwargs): 22 | super(KITTIDataset, self).__init__(*args, **kwargs) 23 | 24 | # NOTE: Make sure your intrinsics matrix is *normalized* by the original image size 25 | self.K = np.array([[0.58, 0, 0.5, 0], 26 | [0, 1.92, 0.5, 0], 27 | [0, 0, 1, 0], 28 | [0, 0, 0, 1]], dtype=np.float32) 29 | self.full_res_shape = (1242, 375) 30 | self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} 31 | 32 | def check_depth(self): 33 | line = self.filenames[0].split() 34 | scene_name = line[0] 35 | frame_index = int(line[1]) 36 | velo_filename = os.path.join( 37 | self.data_path, 38 | scene_name, 39 | "velodyne_points/data/{:010d}.bin".format(int(frame_index))) 40 | return os.path.isfile(velo_filename) 41 | 42 | def index_to_folder_and_frame_idx(self, index): 43 | """Convert index in the dataset to a folder name, frame_idx and any other bits 44 | """ 45 | line = self.filenames[index].split() 46 | folder = line[0] 47 | if len(line) == 3: 48 | frame_index = int(line[1]) 49 | else: 50 | frame_index = 0 51 | if len(line) == 3: 52 | side = line[2] 53 | else: 54 | side = None 55 | return folder, frame_index, side 56 | 57 | def get_color(self, folder, frame_index, side, do_flip): 58 | color = self.loader(self.get_image_path(folder, frame_index, side)) 59 | if do_flip: 60 | color = color.transpose(pil.FLIP_LEFT_RIGHT) 61 | return color 62 | 63 | 64 | class KITTIRAWDataset(KITTIDataset): 65 | """KITTI dataset which loads the original velodyne depth maps for ground truth 66 | """ 67 | def __init__(self, *args, **kwargs): 68 | super(KITTIRAWDataset, self).__init__(*args, **kwargs) 69 | 70 | def get_image_path(self, folder, frame_index, side): 71 | f_str = "{:010d}{}".format(frame_index, self.img_ext) 72 | image_path = os.path.join( 73 | self.data_path, folder, "image_0{}/data".format(self.side_map[side]), f_str) 74 | return image_path 75 | 76 | def get_depth(self, folder, frame_index, side, do_flip): 77 | calib_path = os.path.join(self.data_path, folder.split("/")[0]) 78 | velo_filename = os.path.join( 79 | self.data_path, 80 | folder, 81 | "velodyne_points/data/{:010d}.bin".format(int(frame_index))) 82 | depth_gt = skimage.transform.resize( 83 | depth_gt, self.full_res_shape[::-1], order=0, preserve_range=True, mode='constant') 84 | if do_flip: 85 | depth_gt = np.fliplr(depth_gt) 86 | return depth_gt 87 | 88 | class KITTIOdomDataset(KITTIDataset): 89 | """KITTI dataset for odometry training and testing 90 | """ 91 | def __init__(self, *args, **kwargs): 92 | super(KITTIOdomDataset, self).__init__(*args, **kwargs) 93 | 94 | def get_image_path(self, folder, frame_index, side): 95 | f_str = "{:06d}{}".format(frame_index, self.img_ext) 96 | image_path = os.path.join( 97 | self.data_path, 98 | "sequences/{:02d}".format(int(folder)), 99 | "image_{}".format(self.side_map[side]), 100 | f_str) 101 | return image_path 102 | 103 | 104 | class KITTIDepthDataset(KITTIDataset): 105 | """KITTI dataset which uses the updated ground truth depth maps 106 | """ 107 | def __init__(self, *args, **kwargs): 108 | super(KITTIDepthDataset, self).__init__(*args, **kwargs) 109 | 110 | def get_image_path(self, folder, frame_index, side): 111 | f_str = "{:010d}{}".format(frame_index, self.img_ext) 112 | image_path = os.path.join( 113 | self.data_path, 114 | folder, 115 | "image_0{}/data".format(self.side_map[side]), 116 | f_str) 117 | return image_path 118 | 119 | def get_depth(self, folder, frame_index, side, do_flip): 120 | f_str = "{:010d}.png".format(frame_index) 121 | depth_path = os.path.join( 122 | self.data_path, 123 | folder, 124 | "proj_depth/groundtruth/image_0{}".format(self.side_map[side]), 125 | f_str) 126 | depth_gt = pil.open(depth_path) 127 | depth_gt = depth_gt.resize(self.full_res_shape, pil.NEAREST) 128 | depth_gt = np.array(depth_gt).astype(np.float32) / 256 129 | if do_flip: 130 | depth_gt = np.fliplr(depth_gt) 131 | return depth_gt 132 | 133 | 134 | -------------------------------------------------------------------------------- /datasets/mono_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright Niantic 2021. Patent Pending. All rights reserved. 2 | # 3 | # This software is licensed under the terms of the ManyDepth licence 4 | # which allows for non-commercial use only, the full terms of which are made 5 | # available in the LICENSE file. 6 | 7 | import os 8 | import random 9 | os.environ["MKL_NUM_THREADS"] = "1" # noqa F402 10 | os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402 11 | os.environ["OMP_NUM_THREADS"] = "1" # noqa F402 12 | 13 | import numpy as np 14 | from PIL import Image # using pillow-simd for increased speed 15 | import cv2 16 | 17 | import torch 18 | import torch.utils.data as data 19 | from torchvision import transforms 20 | 21 | cv2.setNumThreads(0) 22 | 23 | 24 | def pil_loader(path): 25 | # open path as file to avoid ResourceWarning 26 | # (https://github.com/python-pillow/Pillow/issues/835) 27 | with open(path, 'rb') as f: 28 | with Image.open(f) as img: 29 | return img.convert('RGB') 30 | 31 | 32 | class MonoDataset(data.Dataset): 33 | """Superclass for monocular dataloaders 34 | """ 35 | def __init__(self, 36 | data_path, 37 | filenames, 38 | height, 39 | width, 40 | frame_idxs, 41 | num_scales, 42 | is_train=False, 43 | img_ext='.png', 44 | ): 45 | super(MonoDataset, self).__init__() 46 | 47 | self.data_path = data_path 48 | self.filenames = filenames 49 | self.height = height 50 | self.width = width 51 | self.num_scales = num_scales 52 | 53 | self.interp = Image.ANTIALIAS 54 | 55 | self.frame_idxs = frame_idxs 56 | 57 | self.is_train = is_train 58 | self.img_ext = '.png' 59 | 60 | self.loader = pil_loader 61 | self.to_tensor = transforms.ToTensor() 62 | 63 | # We need to specify augmentations differently in newer versions of torchvision. 64 | # We first try the newer tuple version; if this fails we fall back to scalars 65 | try: 66 | self.brightness = (0.8, 1.2) 67 | self.contrast = (0.8, 1.2) 68 | self.saturation = (0.8, 1.2) 69 | self.hue = (-0.1, 0.1) 70 | transforms.ColorJitter.get_params( 71 | self.brightness, self.contrast, self.saturation, self.hue) 72 | except TypeError: 73 | self.brightness = 0.2 74 | self.contrast = 0.2 75 | self.saturation = 0.2 76 | self.hue = 0.1 77 | 78 | self.resize = {} 79 | for i in range(self.num_scales): 80 | s = 2 ** i 81 | self.resize[i] = transforms.Resize((self.height // s, self.width // s), 82 | interpolation=self.interp) 83 | 84 | self.load_depth = self.check_depth() 85 | 86 | midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") 87 | self.midas_transform = midas_transforms.small_transform 88 | 89 | def preprocess(self, inputs, color_aug, cutout_aug): 90 | """Resize colour images to the required scales and augment if required 91 | We create the color_aug object in advance and apply the same augmentation to all 92 | images in this item. This ensures that all images input to the pose network receive the 93 | same augmentation. 94 | """ 95 | for k in list(inputs): 96 | if "color" in k: 97 | n, im, i = k 98 | for i in range(self.num_scales): 99 | inputs[(n, im, i)] = self.resize[i](inputs[(n, im, i - 1)]) 100 | 101 | for k in list(inputs): 102 | f = inputs[k] 103 | if "color" in k: 104 | n, im, i = k 105 | inputs[(n, im, i)] = self.to_tensor(f) 106 | if i == 0: 107 | inputs[(n+ "_ori", im, i)] = self.midas_transform(np.array(f)) 108 | # check it isn't a blank frame - keep _aug as zeros so we can check for it 109 | if inputs[(n, im, i)].sum() == 0: 110 | inputs[(n + "_aug", im, i)] = inputs[(n, im, i)] 111 | else: 112 | inputs[(n + "_aug", im, i)] = self.to_tensor(color_aug(f)) 113 | #inputs[(n + "_aug", im, i)] = self.to_tensor(color_aug(f)) 114 | 115 | def __len__(self): 116 | return len(self.filenames) 117 | 118 | def load_intrinsics(self, folder, frame_index): 119 | return self.K.copy() 120 | 121 | def __getitem__(self, index): 122 | """Returns a single training item from the dataset as a dictionary. 123 | 124 | Values correspond to torch tensors. 125 | Keys in the dictionary are either strings or tuples: 126 | 127 | ("color", , ) for raw colour images, 128 | ("color_aug", , ) for augmented colour images, 129 | ("K", scale) or ("inv_K", scale) for camera intrinsics, 130 | "depth_gt" for ground truth depth maps 131 | 132 | is: 133 | an integer (e.g. 0, -1, or 1) representing the temporal step relative to 'index', 134 | 135 | is an integer representing the scale of the image relative to the fullsize image: 136 | -1 images at native resolution as loaded from disk 137 | 0 images resized to (self.width, self.height ) 138 | 1 images resized to (self.width // 2, self.height // 2) 139 | 2 images resized to (self.width // 4, self.height // 4) 140 | 3 images resized to (self.width // 8, self.height // 8) 141 | """ 142 | inputs = {} 143 | 144 | do_color_aug = self.is_train and random.random() > 0.5 145 | do_flip = self.is_train and random.random() > 0.5 146 | #do_cutout = self.is_train and random.random() > 0.5 147 | do_cutout = False 148 | 149 | folder, frame_index, side = self.index_to_folder_and_frame_idx(index) 150 | 151 | poses = {} 152 | if type(self).__name__ in ["CityscapesPreprocessedDataset", "CityscapesEvalDataset"]: 153 | inputs.update(self.get_colors(folder, frame_index, side, do_flip)) 154 | else: 155 | for i in self.frame_idxs: 156 | if i == "s": 157 | other_side = {"r": "l", "l": "r"}[side] 158 | inputs[("color", i, -1)] = self.get_color( 159 | folder, frame_index, other_side, do_flip) 160 | else: 161 | try: 162 | inputs[("color", i, -1)] = self.get_color( 163 | folder, frame_index + i, side, do_flip) 164 | except FileNotFoundError as e: 165 | if i != 0: 166 | # fill with dummy values 167 | inputs[("color", i, -1)] = \ 168 | Image.fromarray(np.zeros((100, 100, 3)).astype(np.uint8)) 169 | poses[i] = None 170 | else: 171 | raise FileNotFoundError(f'Cannot find frame - make sure your ' 172 | f'--data_path is set correctly, or try adding' 173 | f' the --png flag. {e}') 174 | 175 | # adjusting intrinsics to match each scale in the pyramid 176 | for scale in range(self.num_scales): 177 | K = self.load_intrinsics(folder, frame_index) 178 | 179 | K[0, :] *= self.width // (2 ** scale) 180 | K[1, :] *= self.height // (2 ** scale) 181 | 182 | inv_K = np.linalg.pinv(K) 183 | 184 | inputs[("K", scale)] = torch.from_numpy(K) 185 | inputs[("inv_K", scale)] = torch.from_numpy(inv_K) 186 | 187 | if do_color_aug: 188 | color_aug = transforms.ColorJitter( 189 | self.brightness, self.contrast, self.saturation, self.hue) 190 | else: 191 | color_aug = (lambda x: x) 192 | 193 | if do_cutout: 194 | cutout_aug = Cutout(mask_size=64) 195 | else: 196 | cutout_aug = (lambda x: x) 197 | 198 | self.preprocess(inputs, color_aug, cutout_aug) 199 | 200 | for i in self.frame_idxs: 201 | del inputs[("color", i, -1)] 202 | del inputs[("color_aug", i, -1)] 203 | 204 | if self.load_depth and False: 205 | depth_gt = self.get_depth(folder, frame_index, side, do_flip) 206 | inputs["depth_gt"] = np.expand_dims(depth_gt, 0) 207 | inputs["depth_gt"] = torch.from_numpy(inputs["depth_gt"].astype(np.float32)) 208 | 209 | return inputs 210 | 211 | def get_color(self, folder, frame_index, side, do_flip): 212 | raise NotImplementedError 213 | 214 | def check_depth(self): 215 | raise NotImplementedError 216 | 217 | def get_depth(self, folder, frame_index, side, do_flip): 218 | raise NotImplementedError 219 | 220 | class Cutout: 221 | def __init__(self, mask_size): 222 | self.mask_size = mask_size 223 | def __call__(self, image): 224 | height, width = image.size(1), image.size(2) 225 | mask = np.ones((height, width), np.float32) 226 | y = np.random.randint(height) 227 | x = np.random.randint(width) 228 | y1 = np.clip(y - self.mask_size // 2, 0, height) 229 | y2 = np.clip(y + self.mask_size // 2, 0, height) 230 | x1 = np.clip(x - self.mask_size // 2, 0, width) 231 | x2 = np.clip(x + self.mask_size // 2, 0, width) 232 | mask[y1:y2, x1:x2] = 0 233 | mask = torch.from_numpy(mask) 234 | mask = mask.expand_as(image) 235 | image = image * mask 236 | return image -------------------------------------------------------------------------------- /export_gt_depth.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import os 4 | 5 | import argparse 6 | import numpy as np 7 | import PIL.Image as pil 8 | 9 | from utils import readlines 10 | from kitti_utils import generate_depth_map 11 | 12 | 13 | def export_gt_depths_kitti(): 14 | 15 | parser = argparse.ArgumentParser(description='export_gt_depth') 16 | 17 | parser.add_argument('--data_path', 18 | type=str, 19 | help='path to the root of the KITTI data', 20 | required=True) 21 | parser.add_argument('--split', 22 | type=str, 23 | help='which split to export gt from', 24 | required=True, 25 | choices=["eigen", "eigen_benchmark"]) 26 | opt = parser.parse_args() 27 | 28 | split_folder = os.path.join(os.path.dirname(__file__), "splits", opt.split) 29 | lines = readlines(os.path.join(split_folder, "test_files.txt")) 30 | print(len(lines)) 31 | print("Exporting ground truth depths for {}".format(opt.split)) 32 | 33 | gt_depths = [] 34 | for line in lines: 35 | 36 | folder, frame_id, _ = line.split() 37 | frame_id = int(frame_id) 38 | 39 | if opt.split == "eigen": 40 | calib_dir = os.path.join(opt.data_path, folder.split("/")[0]) 41 | velo_filename = os.path.join(opt.data_path, folder, 42 | "velodyne_points/data", "{:010d}.bin".format(frame_id)) 43 | gt_depth = generate_depth_map(calib_dir, velo_filename, 2, True) 44 | elif opt.split == "eigen_benchmark": 45 | gt_depth_path = os.path.join(opt.data_path, folder[11:], "proj_depth", 46 | "groundtruth", "image_02", "{:010d}.png".format(frame_id)) 47 | gt_depth = np.array(pil.open(gt_depth_path)).astype(np.float32) / 256 48 | 49 | gt_depths.append(gt_depth.astype(np.float32)) 50 | 51 | output_path = os.path.join(split_folder, "gt_depths.npz") 52 | 53 | print("Saving to {}".format(opt.split)) 54 | 55 | np.savez_compressed(output_path, data=np.array(gt_depths)) 56 | 57 | 58 | if __name__ == "__main__": 59 | export_gt_depths_kitti() 60 | -------------------------------------------------------------------------------- /kitti_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import os 4 | import numpy as np 5 | from collections import Counter 6 | 7 | 8 | def load_velodyne_points(filename): 9 | """Load 3D point cloud from KITTI file format 10 | (adapted from https://github.com/hunse/kitti) 11 | """ 12 | points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4) 13 | points[:, 3] = 1.0 # homogeneous 14 | return points 15 | 16 | 17 | def read_calib_file(path): 18 | """Read KITTI calibration file 19 | (from https://github.com/hunse/kitti) 20 | """ 21 | float_chars = set("0123456789.e+- ") 22 | data = {} 23 | with open(path, 'r') as f: 24 | for line in f.readlines(): 25 | key, value = line.split(':', 1) 26 | value = value.strip() 27 | data[key] = value 28 | if float_chars.issuperset(value): 29 | # try to cast to float array 30 | try: 31 | data[key] = np.array(list(map(float, value.split(' ')))) 32 | except ValueError: 33 | # casting error: data[key] already eq. value, so pass 34 | pass 35 | 36 | return data 37 | 38 | 39 | def sub2ind(matrixSize, rowSub, colSub): 40 | """Convert row, col matrix subscripts to linear indices 41 | """ 42 | m, n = matrixSize 43 | return rowSub * (n-1) + colSub - 1 44 | 45 | 46 | def generate_depth_map(calib_dir, velo_filename, cam=2, vel_depth=False): 47 | """Generate a depth map from velodyne data 48 | """ 49 | # load calibration files 50 | cam2cam = read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt')) 51 | velo2cam = read_calib_file(os.path.join(calib_dir, 'calib_velo_to_cam.txt')) 52 | velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis])) 53 | velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0]))) 54 | 55 | # get image shape 56 | im_shape = cam2cam["S_rect_02"][::-1].astype(np.int32) 57 | 58 | # compute projection matrix velodyne->image plane 59 | R_cam2rect = np.eye(4) 60 | R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3) 61 | P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3, 4) 62 | P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam) 63 | 64 | # load velodyne points and remove all behind image plane (approximation) 65 | # each row of the velodyne data is forward, left, up, reflectance 66 | velo = load_velodyne_points(velo_filename) 67 | velo = velo[velo[:, 0] >= 0, :] 68 | 69 | # project the points to the camera 70 | velo_pts_im = np.dot(P_velo2im, velo.T).T 71 | velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis] 72 | 73 | if vel_depth: 74 | velo_pts_im[:, 2] = velo[:, 0] 75 | 76 | # check if in bounds 77 | # use minus 1 to get the exact same value as KITTI matlab code 78 | velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1 79 | velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1 80 | val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0) 81 | val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0]) 82 | velo_pts_im = velo_pts_im[val_inds, :] 83 | 84 | # project to image 85 | depth = np.zeros((im_shape[:2])) 86 | depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2] 87 | 88 | # find the duplicate points and choose the closest depth 89 | inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0]) 90 | dupe_inds = [item for item, count in Counter(inds).items() if count > 1] 91 | for dd in dupe_inds: 92 | pts = np.where(inds == dd)[0] 93 | x_loc = int(velo_pts_im[pts[0], 0]) 94 | y_loc = int(velo_pts_im[pts[0], 1]) 95 | depth[y_loc, x_loc] = velo_pts_im[pts, 2].min() 96 | depth[depth < 0] = 0 97 | 98 | return depth 99 | -------------------------------------------------------------------------------- /layers.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import numpy as np 4 | 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | 9 | 10 | def disp_to_depth(disp, min_depth, max_depth): 11 | """Convert network's sigmoid output into depth prediction 12 | The formula for this conversion is given in the 'additional considerations' 13 | section of the paper. 14 | """ 15 | min_disp = 1 / max_depth 16 | max_disp = 1 / min_depth 17 | scaled_disp = min_disp + (max_disp - min_disp) * disp 18 | depth = 1 / scaled_disp 19 | return scaled_disp, depth 20 | 21 | 22 | def transformation_from_parameters(axisangle, translation, invert=False): 23 | """Convert the pose_decoder network's (axisangle, translation) output into a 4x4 matrix 24 | """ 25 | 26 | R = rot_from_axisangle(axisangle) 27 | t = translation.clone() 28 | # translation[12 x 1 x 3] 29 | # axisnagle[12 x 1 x 3] 30 | # R [12 X 4 x 4] 31 | # t [12 x 1 x3] 32 | 33 | if invert: 34 | R = R.transpose(1, 2) 35 | t *= -1 36 | 37 | T = get_translation_matrix(t) 38 | #T [12 X 4 X 4] 39 | if invert: 40 | M = torch.matmul(R, T) 41 | else: 42 | M = torch.matmul(T, R) 43 | #M [12 X 4 X 4] 44 | return M 45 | 46 | 47 | def get_translation_matrix(translation_vector): 48 | """Convert a translation vector into a 4x4 transformation matrix 49 | """ 50 | T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device) 51 | 52 | t = translation_vector.contiguous().view(-1, 3, 1) 53 | 54 | T[:, 0, 0] = 1 55 | T[:, 1, 1] = 1 56 | T[:, 2, 2] = 1 57 | T[:, 3, 3] = 1 58 | T[:, :3, 3, None] = t 59 | return T 60 | 61 | 62 | def rot_from_axisangle(vec): 63 | """Convert an axisangle rotation into a 4x4 transformation matrix 64 | (adapted from https://github.com/Wallacoloo/printipi) 65 | Input 'vec' has to be Bx1x3 66 | """ 67 | 68 | angle = torch.norm(vec, 2, 2, True) 69 | axis = vec / (angle + 1e-7) 70 | # angle [12 x 1 x 1] 71 | # axis [12 x 1 x 3] 72 | ca = torch.cos(angle) 73 | sa = torch.sin(angle) 74 | C = 1 - ca 75 | x = axis[..., 0].unsqueeze(1) 76 | y = axis[..., 1].unsqueeze(1) 77 | z = axis[..., 2].unsqueeze(1) 78 | #x,y,z [12 x 1 x 1] 79 | 80 | xs = x * sa 81 | ys = y * sa 82 | zs = z * sa 83 | xC = x * C 84 | yC = y * C 85 | zC = z * C 86 | xyC = x * yC 87 | yzC = y * zC 88 | zxC = z * xC 89 | 90 | rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device) 91 | 92 | rot[:, 0, 0] = torch.squeeze(x * xC + ca) 93 | rot[:, 0, 1] = torch.squeeze(xyC - zs) 94 | rot[:, 0, 2] = torch.squeeze(zxC + ys) 95 | rot[:, 1, 0] = torch.squeeze(xyC + zs) 96 | rot[:, 1, 1] = torch.squeeze(y * yC + ca) 97 | rot[:, 1, 2] = torch.squeeze(yzC - xs) 98 | rot[:, 2, 0] = torch.squeeze(zxC - ys) 99 | rot[:, 2, 1] = torch.squeeze(yzC + xs) 100 | rot[:, 2, 2] = torch.squeeze(z * zC + ca) 101 | rot[:, 3, 3] = 1 102 | 103 | return rot 104 | 105 | 106 | class ConvBlock(nn.Module): 107 | """Layer to perform a convolution followed by ELU 108 | """ 109 | def __init__(self, in_channels, out_channels): 110 | super(ConvBlock, self).__init__() 111 | 112 | self.conv = Conv3x3(in_channels, out_channels) 113 | self.nonlin = nn.ELU(inplace=True)#why use ELU? 114 | 115 | def forward(self, x): 116 | out = self.conv(x) 117 | out = self.nonlin(out) 118 | return out 119 | 120 | 121 | class Conv3x3(nn.Module): 122 | """Layer to pad and convolve input 123 | """ 124 | def __init__(self, in_channels, out_channels, use_refl=True): 125 | super(Conv3x3, self).__init__() 126 | 127 | if use_refl: 128 | self.pad = nn.ReflectionPad2d(1) 129 | else: 130 | self.pad = nn.ZeroPad2d(1) 131 | self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) 132 | 133 | def forward(self, x): 134 | out = self.pad(x) 135 | out = self.conv(out) 136 | return out 137 | 138 | 139 | class BackprojectDepth(nn.Module): 140 | """Layer to transform a depth image into a point cloud 141 | """ 142 | def __init__(self, batch_size, height, width): 143 | super(BackprojectDepth, self).__init__() 144 | 145 | self.batch_size = batch_size 146 | self.height = height 147 | self.width = width 148 | 149 | meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy') 150 | self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32) 151 | self.id_coords = nn.Parameter(torch.from_numpy(self.id_coords), 152 | requires_grad=False) 153 | # self.id_coords has two elements, 154 | self.ones = nn.Parameter(torch.ones(self.batch_size, 1, self.height * self.width), 155 | requires_grad=False) 156 | 157 | self.pix_coords = torch.unsqueeze(torch.stack( 158 | [self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0) 159 | self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1) 160 | self.pix_coords = nn.Parameter(torch.cat([self.pix_coords, self.ones], 1), 161 | requires_grad=False) 162 | def forward(self, depth, inv_K): 163 | cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords) 164 | cam_points = depth.view(self.batch_size, 1, -1) * cam_points 165 | cam_points = torch.cat([cam_points, self.ones], 1) 166 | return cam_points 167 | 168 | class Project3D(nn.Module): 169 | """Layer which projects 3D points into a camera with intrinsics K and at position T 170 | """ 171 | def __init__(self, batch_size, height, width, eps=1e-7): 172 | super(Project3D, self).__init__() 173 | self.batch_size = batch_size 174 | self.height = height 175 | self.width = width 176 | self.eps = eps 177 | def forward(self, points, K, T): 178 | P = torch.matmul(K, T)[:, :3, :] 179 | cam_points = torch.matmul(P, points) 180 | pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps) 181 | pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.width) 182 | pix_coords = pix_coords.permute(0, 2, 3, 1) 183 | pix_coords[..., 0] /= self.width - 1 184 | pix_coords[..., 1] /= self.height - 1 185 | pix_coords = (pix_coords - 0.5) * 2 186 | return pix_coords 187 | 188 | def upsample(x): 189 | """Upsample input tensor by a factor of 2 190 | """ 191 | return F.interpolate(x, scale_factor=2, mode="nearest") 192 | 193 | def get_smooth_loss(disp, img): 194 | """Computes the smoothness loss for a disparity image 195 | The color image is used for edge-aware smoothness 196 | """ 197 | grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:]) 198 | grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :]) 199 | grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True) 200 | grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True) 201 | grad_disp_x *= torch.exp(-grad_img_x) 202 | grad_disp_y *= torch.exp(-grad_img_y) 203 | return grad_disp_x.mean() + grad_disp_y.mean() 204 | 205 | class SSIM(nn.Module): 206 | """Layer to compute the SSIM loss between a pair of images 207 | """ 208 | def __init__(self): 209 | super(SSIM, self).__init__() 210 | self.mu_x_pool = nn.AvgPool2d(3, 1) 211 | self.mu_y_pool = nn.AvgPool2d(3, 1) 212 | self.sig_x_pool = nn.AvgPool2d(3, 1) 213 | self.sig_y_pool = nn.AvgPool2d(3, 1) 214 | self.sig_xy_pool = nn.AvgPool2d(3, 1) 215 | 216 | self.refl = nn.ReflectionPad2d(1) 217 | 218 | self.C1 = 0.01 ** 2#??why 0.01 219 | self.C2 = 0.03 ** 2 220 | 221 | def forward(self, x, y): 222 | x = self.refl(x) 223 | y = self.refl(y) 224 | 225 | mu_x = self.mu_x_pool(x) 226 | mu_y = self.mu_y_pool(y) 227 | sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 228 | sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 229 | sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y 230 | 231 | SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) 232 | SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) 233 | 234 | return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) 235 | 236 | 237 | def compute_depth_errors(gt, pred): 238 | """Computation of error metrics between predicted and ground truth depths 239 | """ 240 | thresh = torch.max((gt / pred), (pred / gt)) 241 | a1 = (thresh < 1.25 ).float().mean() 242 | a2 = (thresh < 1.25 ** 2).float().mean() 243 | a3 = (thresh < 1.25 ** 3).float().mean() 244 | 245 | rmse = (gt - pred) ** 2 246 | rmse = torch.sqrt(rmse.mean()) 247 | 248 | rmse_log = (torch.log(gt) - torch.log(pred)) ** 2 249 | rmse_log = torch.sqrt(rmse_log.mean()) 250 | 251 | abs_rel = torch.mean(torch.abs(gt - pred) / gt) 252 | 253 | sq_rel = torch.mean((gt - pred) ** 2 / gt) 254 | 255 | return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 256 | 257 | class Backproject(nn.Module): 258 | def __init__(self, batch_size, height, width): 259 | super(Backproject, self).__init__() 260 | self.batch_size = batch_size 261 | self.height = height 262 | self.width = width 263 | meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy') 264 | self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32) 265 | self.id_coords = torch.from_numpy(self.id_coords) 266 | self.ones = torch.ones(self.batch_size, 1, self.height * self.width) 267 | self.pix_coords = torch.unsqueeze(torch.stack([self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0) 268 | self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1) 269 | self.pix_coords = torch.cat([self.pix_coords, self.ones], 1) 270 | def forward(self, depth, inv_K): 271 | cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords.cuda()) 272 | cam_points = depth.view(self.batch_size, 1, -1) * cam_points 273 | cam_points = torch.cat([cam_points, self.ones.cuda()], 1) 274 | return cam_points 275 | 276 | 277 | class Project(nn.Module): 278 | def __init__(self, batch_size, height, width, eps=1e-7): 279 | super(Project, self).__init__() 280 | self.batch_size = batch_size 281 | self.height = height 282 | self.width = width 283 | self.eps = eps 284 | def forward(self, points, K, T): 285 | P = torch.matmul(K, T)[:, :3, :] 286 | cam_points = torch.matmul(P, points) 287 | pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps) 288 | pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.width) 289 | point_depth = cam_points[:, 2, :].view(self.batch_size, 1, self.height, self.width) 290 | pix_coords = pix_coords.permute(0, 2, 3, 1) 291 | pix_coords[..., 0] /= self.width - 1 292 | pix_coords[..., 1] /= self.height - 1 293 | pix_coords = (pix_coords - 0.5) * 2 294 | return pix_coords, point_depth -------------------------------------------------------------------------------- /logs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/logs/.DS_Store -------------------------------------------------------------------------------- /networks_dev/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet_encoder import ResnetEncoder 2 | from .depth_decoder import DepthDecoder 3 | from .pose_decoder import PoseDecoder -------------------------------------------------------------------------------- /networks_dev/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder2.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder3.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder3.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder6.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder6.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder6.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder6.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/depth_decoder_stu.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/depth_decoder_stu.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/layers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/layers.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/layers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/layers.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/net.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/net.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/net.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/net.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/pose_decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/pose_decoder.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/pose_decoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/pose_decoder.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/pose_encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/pose_encoder.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/pose_encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/pose_encoder.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/render_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/render_utils.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/render_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/render_utils.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/resnet_encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/resnet_encoder.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/resnet_encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/resnet_encoder.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /networks_dev/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/networks_dev/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /networks_dev/depth_decoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | 6 | class Conv1x1(nn.Module): 7 | def __init__(self, in_channels, out_channels, bias=False): 8 | super(Conv1x1, self).__init__() 9 | self.conv = nn.Conv2d(int(in_channels), int(out_channels), kernel_size=1, stride=1, bias=bias) 10 | def forward(self, x): 11 | out = self.conv(x) 12 | return out 13 | 14 | class Conv3x3(nn.Module): 15 | def __init__(self, in_channels, out_channels, use_refl=True): 16 | super(Conv3x3, self).__init__() 17 | if use_refl: 18 | self.pad = nn.ReflectionPad2d(1) 19 | else: 20 | self.pad = nn.ZeroPad2d(1) 21 | self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) 22 | def forward(self, x): 23 | out = self.pad(x) 24 | out = self.conv(out) 25 | return out 26 | 27 | class CRPBlock(nn.Module): 28 | def __init__(self, in_planes, out_planes, n_stages): 29 | super(CRPBlock, self).__init__() 30 | for i in range(n_stages): 31 | setattr(self, '{}_{}'.format(i + 1, 'pointwise'), Conv1x1(in_planes if (i == 0) else out_planes, out_planes, False)) 32 | self.stride = 1 33 | self.n_stages = n_stages 34 | self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2) 35 | def forward(self, x): 36 | top = x 37 | for i in range(self.n_stages): 38 | top = self.maxpool(top) 39 | top = getattr(self, '{}_{}'.format(i + 1, 'pointwise'))(top) 40 | x = top + x 41 | return x 42 | 43 | class ChannelAttention(nn.Module): 44 | def __init__(self, in_planes, ratio=16): 45 | super(ChannelAttention, self).__init__() 46 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 47 | self.fc = nn.Sequential( 48 | nn.Linear(in_planes,in_planes // ratio, bias = False), 49 | nn.ReLU(inplace = True), 50 | nn.Linear(in_planes // ratio, in_planes, bias = False)) 51 | self.sigmoid = nn.Sigmoid() 52 | for m in self.modules(): 53 | if isinstance(m, nn.Conv2d): 54 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 55 | def forward(self, in_feature): 56 | x = in_feature 57 | b, c, _, _ = in_feature.size() 58 | avg_out = self.fc(self.avg_pool(x).view(b,c)).view(b, c, 1, 1) 59 | out = avg_out 60 | return self.sigmoid(out).expand_as(in_feature) * in_feature 61 | 62 | class Attention_Module1(nn.Module): 63 | def __init__(self, high_feature_channel, output_channel = None): 64 | super(Attention_Module1, self).__init__() 65 | in_channel = high_feature_channel 66 | out_channel = high_feature_channel 67 | if output_channel is not None: 68 | out_channel = output_channel 69 | channel = in_channel 70 | self.ca = ChannelAttention(channel) 71 | self.conv_se = nn.Conv2d(in_channels = in_channel, out_channels = out_channel, kernel_size = 1, stride = 1, padding = 0 ) 72 | self.relu = nn.ReLU(inplace = True) 73 | def forward(self, high_features): 74 | features = high_features 75 | features = self.ca(features) 76 | return self.relu(self.conv_se(features)) 77 | 78 | class Attention_Module3(nn.Module): 79 | def __init__(self, high_feature_channel, output_channel = None): 80 | super(Attention_Module3, self).__init__() 81 | in_channel = high_feature_channel 82 | out_channel = high_feature_channel 83 | if output_channel is not None: 84 | out_channel = output_channel 85 | channel = in_channel 86 | self.ca = ChannelAttention(channel) 87 | self.conv_se = nn.Conv2d(in_channels = in_channel, out_channels = out_channel, kernel_size = 3, stride = 1, padding = 1 ) 88 | self.relu = nn.ReLU(inplace = True) 89 | def forward(self, high_features): 90 | features = high_features 91 | features = self.ca(features) 92 | return self.relu(self.conv_se(features)) 93 | 94 | def upsample(x): 95 | return F.interpolate(x, scale_factor=2, mode="nearest") 96 | 97 | class DepthDecoder(nn.Module): 98 | def __init__(self, num_ch_enc, embedder, embedder_out_dim, output_channels, use_alpha): 99 | super(DepthDecoder, self).__init__() 100 | bottleneck = [512, 256, 256, 256, 256, output_channels] 101 | stage = 4 102 | self.do = nn.Dropout(p=0.5) 103 | self.embedder = embedder 104 | self.E = embedder_out_dim 105 | self.use_alpha = use_alpha 106 | self.sigma_dropout_rate = 0.0 107 | 108 | self.attention1 = False 109 | self.attention2 = False 110 | self.attention3 = True 111 | """ 112 | Structure: 113 | x4 = self.reduce4(l4) # 1 114 | # Merge 115 | x4 = self.iconv4(x4) # 2 116 | x4 = self.crp4(x4) 117 | x4 = self.merge4(x4) # 3 118 | """ 119 | 120 | self.skip_link = True 121 | self.p1, self.p2, self.p3 = [1.], [0.5], [0.1] 122 | #self.p1, self.p2, self.p3 = [1.], [1.], [1.] 123 | 124 | self.elu = 1 125 | self.soft_plus = False 126 | if self.elu == 0: 127 | self.activation_func = nn.ELU() 128 | elif self.elu == 1: 129 | self.activation_func = nn.LeakyReLU() 130 | elif self.elu == 2: 131 | self.activation_func = nn.ReLU() 132 | 133 | if not self.attention1: 134 | # For new feature 135 | self.reduce4 = Conv1x1(num_ch_enc[4], bottleneck[0], bias=False) 136 | self.reduce3 = Conv1x1(num_ch_enc[3], bottleneck[1], bias=False) 137 | self.reduce2 = Conv1x1(num_ch_enc[2], bottleneck[2], bias=False) 138 | self.reduce1 = Conv1x1(num_ch_enc[1], bottleneck[3], bias=False) 139 | self.reduce0 = Conv1x1(num_ch_enc[0], bottleneck[4], bias=False) 140 | else: 141 | self.reduce4 = Attention_Module1(num_ch_enc[4], bottleneck[0]) 142 | self.reduce3 = Attention_Module1(num_ch_enc[3], bottleneck[1]) 143 | self.reduce2 = Attention_Module1(num_ch_enc[2], bottleneck[2]) 144 | self.reduce1 = Attention_Module1(num_ch_enc[1], bottleneck[3]) 145 | self.reduce0 = Attention_Module1(num_ch_enc[0], bottleneck[4]) 146 | 147 | if not self.skip_link: 148 | if not self.attention2: 149 | self.iconv4 = Conv3x3(bottleneck[0], bottleneck[1]) 150 | self.iconv3 = Conv3x3(bottleneck[1]*2, bottleneck[2]) 151 | self.iconv2 = Conv3x3(bottleneck[2]*2, bottleneck[3]) 152 | self.iconv1 = Conv3x3(bottleneck[3]*2, bottleneck[4]) 153 | self.iconv0 = Conv3x3(bottleneck[4]*2, bottleneck[5]) 154 | else: 155 | self.iconv4 = Attention_Module3(bottleneck[0], bottleneck[1]) 156 | self.iconv3 = Attention_Module3(bottleneck[1]*2, bottleneck[2]) 157 | self.iconv2 = Attention_Module3(bottleneck[2]*2, bottleneck[3]) 158 | self.iconv1 = Attention_Module3(bottleneck[3]*2, bottleneck[4]) 159 | self.iconv0 = Attention_Module3(bottleneck[4]*2, bottleneck[5]) 160 | else: 161 | if not self.attention2: 162 | self.iconv4 = Conv3x3(bottleneck[0], bottleneck[1]) 163 | self.iconv3 = Conv3x3(bottleneck[1]*2+output_channels, bottleneck[2]) 164 | self.iconv2 = Conv3x3(bottleneck[2]*2+output_channels, bottleneck[3]) 165 | self.iconv1 = Conv3x3(bottleneck[3]*2+output_channels, bottleneck[4]) 166 | self.iconv0 = Conv3x3(bottleneck[4]*2+output_channels, bottleneck[5]) 167 | else: 168 | self.iconv4 = Attention_Module3(bottleneck[0], bottleneck[1]) 169 | self.iconv3 = Attention_Module3(bottleneck[1]*2+output_channels, bottleneck[2]) 170 | self.iconv2 = Attention_Module3(bottleneck[2]*2+output_channels, bottleneck[3]) 171 | self.iconv1 = Attention_Module3(bottleneck[3]*2+output_channels, bottleneck[4]) 172 | self.iconv0 = Conv3x3(bottleneck[4]*2+output_channels, bottleneck[5]) 173 | """ 174 | self.crp4 = self._make_crp(bottleneck[1], bottleneck[1], stage) 175 | self.crp3 = self._make_crp(bottleneck[2], bottleneck[2], stage) 176 | self.crp2 = self._make_crp(bottleneck[3], bottleneck[3], stage) 177 | self.crp1 = self._make_crp(bottleneck[4], bottleneck[4], stage) 178 | self.crp0 = self._make_crp(bottleneck[5], bottleneck[5], stage) 179 | """ 180 | if not self.attention3: 181 | self.merge4 = Conv3x3(bottleneck[1], bottleneck[1]) 182 | self.merge3 = Conv3x3(bottleneck[2], bottleneck[2]) 183 | self.merge2 = Conv3x3(bottleneck[3], bottleneck[3]) 184 | self.merge1 = Conv3x3(bottleneck[4], bottleneck[4]) 185 | self.merge0 = Conv3x3(bottleneck[5], bottleneck[5]) 186 | else: 187 | # feature fusion 188 | self.merge4 = Attention_Module3(bottleneck[1], bottleneck[1]) 189 | self.merge3 = Attention_Module3(bottleneck[2], bottleneck[2]) 190 | self.merge2 = Attention_Module3(bottleneck[3], bottleneck[3]) 191 | self.merge1 = Attention_Module3(bottleneck[4], bottleneck[4]) 192 | self.merge0 = Attention_Module3(bottleneck[5], bottleneck[5]) 193 | 194 | # disp 195 | if not self.use_alpha: 196 | self.disp4 = nn.Sequential(Conv3x3(bottleneck[1], output_channels), nn.ReLU()) 197 | self.disp3 = nn.Sequential(Conv3x3(bottleneck[2], output_channels), nn.ReLU()) 198 | self.disp2 = nn.Sequential(Conv3x3(bottleneck[3], output_channels), nn.ReLU()) 199 | self.disp1 = nn.Sequential(Conv3x3(bottleneck[4], output_channels), nn.ReLU()) 200 | self.disp0 = nn.Sequential(Conv3x3(bottleneck[5], output_channels), nn.ReLU()) 201 | else: 202 | self.disp4 = nn.Sequential(Conv3x3(bottleneck[1], output_channels), nn.Sigmoid()) 203 | self.disp3 = nn.Sequential(Conv3x3(bottleneck[2], output_channels), nn.Sigmoid()) 204 | self.disp2 = nn.Sequential(Conv3x3(bottleneck[3], output_channels), nn.Sigmoid()) 205 | self.disp1 = nn.Sequential(Conv3x3(bottleneck[4], output_channels), nn.Sigmoid()) 206 | self.disp0 = nn.Sequential(Conv3x3(bottleneck[5], output_channels), nn.Sigmoid()) 207 | 208 | if self.soft_plus: 209 | self.disp4 = nn.Sequential(Conv3x3(bottleneck[1], output_channels), nn.Softplus()) 210 | self.disp3 = nn.Sequential(Conv3x3(bottleneck[2], output_channels), nn.Softplus()) 211 | self.disp2 = nn.Sequential(Conv3x3(bottleneck[3], output_channels), nn.Softplus()) 212 | self.disp1 = nn.Sequential(Conv3x3(bottleneck[4], output_channels), nn.Softplus()) 213 | self.disp0 = nn.Sequential(Conv3x3(bottleneck[5], output_channels), nn.Softplus()) 214 | 215 | def _make_crp(self, in_planes, out_planes, stages): 216 | layers = [CRPBlock(in_planes, out_planes, stages)] 217 | return nn.Sequential(*layers) 218 | 219 | def forward(self, input_features, disparity): 220 | B, S = disparity.size() 221 | disp_list = self.embedder(disparity.reshape(B * S, 1)).unsqueeze(2).unsqueeze(3) 222 | 223 | self.outputs = {} 224 | l0, l1, l2, l3, l4 = input_features 225 | if self.training: 226 | l4, l3 = self.do(l4), self.do(l3) 227 | 228 | x4 = self.reduce4(l4) 229 | x4 = self.iconv4(x4) 230 | x4 = self.activation_func(x4) 231 | #x4 = self.crp4(x4) 232 | x4 = self.merge4(x4) 233 | x4 = self.activation_func(x4) 234 | x4 = upsample(x4) 235 | disp4 = self.disp4(x4) 236 | 237 | x3 = self.reduce3(l3) 238 | _, _, H_feat, W_feat = x3.size() 239 | x3 = torch.cat((self.p1[0]*x3, self.p2[0]*x4, self.p3[0]*disp4), 1) 240 | x3 = self.iconv3(x3) 241 | x3 = self.activation_func(x3) 242 | #x3 = self.crp3(x3) 243 | x3 = self.merge3(x3) 244 | x3 = self.activation_func(x3) 245 | x3 = upsample(x3) 246 | disp3 = self.disp3(x3) 247 | if self.sigma_dropout_rate > 0.0 and self.training: 248 | disp3 = F.dropout2d(disp3, p=self.sigma_dropout_rate) 249 | 250 | x2 = self.reduce2(l2) 251 | _, _, H_feat, W_feat = x2.size() 252 | if not self.skip_link: 253 | x2 = torch.cat((self.p1[0]*x2, self.p2[0]*x3), 1) 254 | else: 255 | x2 = torch.cat((self.p1[0]*x2, self.p2[0]*x3, self.p3[0]*disp3), 1) 256 | x2 = self.iconv2(x2) 257 | x2 = self.activation_func(x2) 258 | #x2 = self.crp2(x2) 259 | x2 = self.merge2(x2) 260 | x2 = self.activation_func(x2) 261 | x2 = upsample(x2) 262 | disp2 = self.disp2(x2) 263 | if self.sigma_dropout_rate > 0.0 and self.training: 264 | disp2 = F.dropout2d(disp2, p=self.sigma_dropout_rate) 265 | 266 | x1 = self.reduce1(l1) 267 | _, _, H_feat, W_feat = x1.size() 268 | if not self.skip_link: 269 | x1 = torch.cat((self.p1[0]*x1, self.p2[0]*x2), 1) 270 | else: 271 | x1 = torch.cat((self.p1[0]*x1, self.p2[0]*x2, self.p3[0]*disp2), 1) 272 | x1 = self.iconv1(x1) 273 | x1 = self.activation_func(x1) 274 | #x1 = self.crp1(x1) 275 | x1 = self.merge1(x1) 276 | x1 = self.activation_func(x1) 277 | x1 = upsample(x1) 278 | disp1 = self.disp1(x1) 279 | if self.sigma_dropout_rate > 0.0 and self.training: 280 | disp1 = F.dropout2d(disp1, p=self.sigma_dropout_rate) 281 | 282 | x0 = self.reduce0(l0) 283 | _, _, H_feat, W_feat = x0.size() 284 | if not self.skip_link: 285 | x0 = torch.cat((self.p1[0]*x0, self.p2[0]*x1), 1) 286 | else: 287 | x0 = torch.cat((self.p1[0]*x0, self.p2[0]*x1, self.p3[0]*disp1), 1) 288 | x0 = self.iconv0(x0) 289 | x0 = self.activation_func(x0) 290 | #x0 = self.crp0(x0) 291 | x0 = self.merge0(x0) 292 | x0 = self.activation_func(x0) 293 | x0 = upsample(x0) 294 | disp0 = self.disp0(x0) 295 | if self.sigma_dropout_rate > 0.0 and self.training: 296 | disp0 = F.dropout2d(disp0, p=self.sigma_dropout_rate) 297 | 298 | H_mpi, W_mpi = disp3.size(2), disp3.size(3) 299 | if not self.use_alpha: 300 | self.outputs[("disp", 3)] = disp3.view(B, S, H_mpi, W_mpi).unsqueeze(2) + 1e-8 301 | elif self.use_alpha: 302 | self.outputs[("disp", 3)] = disp3.view(B, S, H_mpi, W_mpi).unsqueeze(2) 303 | 304 | H_mpi, W_mpi = disp2.size(2), disp2.size(3) 305 | if not self.use_alpha: 306 | self.outputs[("disp", 2)] = disp2.view(B, S, H_mpi, W_mpi).unsqueeze(2) + 1e-8 307 | elif self.use_alpha: 308 | self.outputs[("disp", 2)] = disp2.view(B, S, H_mpi, W_mpi).unsqueeze(2) 309 | 310 | H_mpi, W_mpi = disp1.size(2), disp1.size(3) 311 | if not self.use_alpha: 312 | self.outputs[("disp", 1)] = disp1.view(B, S, H_mpi, W_mpi).unsqueeze(2) + 1e-8 313 | elif self.use_alpha: 314 | self.outputs[("disp", 1)] = disp1.view(B, S, H_mpi, W_mpi).unsqueeze(2) 315 | 316 | H_mpi, W_mpi = disp0.size(2), disp0.size(3) 317 | if not self.use_alpha: 318 | self.outputs[("disp", 0)] = disp0.view(B, S, H_mpi, W_mpi).unsqueeze(2) + 1e-8 319 | elif self.use_alpha: 320 | self.outputs[("disp", 0)] = disp0.view(B, S, H_mpi, W_mpi).unsqueeze(2) 321 | return self.outputs -------------------------------------------------------------------------------- /networks_dev/layers.py: -------------------------------------------------------------------------------- 1 | # Copyright Niantic 2019. Patent Pending. All rights reserved. 2 | # 3 | # This software is licensed under the terms of the Monodepth2 licence 4 | # which allows for non-commercial use only, the full terms of which are made 5 | # available in the LICENSE file. 6 | 7 | from __future__ import absolute_import, division, print_function 8 | import numpy as np 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | 13 | def transformation_from_parameters(axisangle, translation, invert=False): 14 | """Convert the network's (axisangle, translation) output into a 4x4 matrix 15 | """ 16 | R = rot_from_axisangle(axisangle) 17 | t = translation.clone() 18 | if invert: 19 | R = R.transpose(1, 2) 20 | t *= -1 21 | T = get_translation_matrix(t) 22 | if invert: 23 | M = torch.matmul(R, T) 24 | else: 25 | M = torch.matmul(T, R) 26 | return M 27 | 28 | def get_translation_matrix(translation_vector): 29 | """Convert a translation vector into a 4x4 transformation matrix 30 | """ 31 | T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device) 32 | t = translation_vector.contiguous().view(-1, 3, 1) 33 | T[:, 0, 0] = 1 34 | T[:, 1, 1] = 1 35 | T[:, 2, 2] = 1 36 | T[:, 3, 3] = 1 37 | T[:, :3, 3, None] = t 38 | return T 39 | 40 | def rot_from_axisangle(vec): 41 | angle = torch.norm(vec, 2, 2, True) 42 | axis = vec / (angle + 1e-7) 43 | ca = torch.cos(angle) 44 | sa = torch.sin(angle) 45 | C = 1 - ca 46 | x = axis[..., 0].unsqueeze(1) 47 | y = axis[..., 1].unsqueeze(1) 48 | z = axis[..., 2].unsqueeze(1) 49 | xs = x * sa 50 | ys = y * sa 51 | zs = z * sa 52 | xC = x * C 53 | yC = y * C 54 | zC = z * C 55 | xyC = x * yC 56 | yzC = y * zC 57 | zxC = z * xC 58 | rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device) 59 | rot[:, 0, 0] = torch.squeeze(x * xC + ca) 60 | rot[:, 0, 1] = torch.squeeze(xyC - zs) 61 | rot[:, 0, 2] = torch.squeeze(zxC + ys) 62 | rot[:, 1, 0] = torch.squeeze(xyC + zs) 63 | rot[:, 1, 1] = torch.squeeze(y * yC + ca) 64 | rot[:, 1, 2] = torch.squeeze(yzC - xs) 65 | rot[:, 2, 0] = torch.squeeze(zxC - ys) 66 | rot[:, 2, 1] = torch.squeeze(yzC + xs) 67 | rot[:, 2, 2] = torch.squeeze(z * zC + ca) 68 | rot[:, 3, 3] = 1 69 | return rot 70 | 71 | class ConvBlock(nn.Module): 72 | """Layer to perform a convolution followed by ELU 73 | """ 74 | def __init__(self, in_channels, out_channels): 75 | super(ConvBlock, self).__init__() 76 | self.conv = Conv3x3(in_channels, out_channels) 77 | self.nonlin = nn.ELU(inplace=True) 78 | self.bn = nn.BatchNorm2d(out_channels) 79 | def forward(self, x): 80 | out = self.conv(x) 81 | out = self.bn(out) 82 | out = self.nonlin(out) 83 | return out 84 | 85 | class Conv3x3(nn.Module): 86 | """Layer to pad and convolve input.""" 87 | def __init__(self, in_channels, out_channels, use_refl=True): 88 | super(Conv3x3, self).__init__() 89 | if use_refl: 90 | self.pad = nn.ReflectionPad2d(1) 91 | else: 92 | self.pad = nn.ZeroPad2d(1) 93 | self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) 94 | def forward(self, x): 95 | out = self.pad(x) 96 | out = self.conv(out) 97 | return out 98 | 99 | class Backproject(nn.Module): 100 | def __init__(self, batch_size, height, width): 101 | super(Backproject, self).__init__() 102 | self.batch_size = batch_size 103 | self.height = height 104 | self.width = width 105 | meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy') 106 | self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32) 107 | self.id_coords = torch.from_numpy(self.id_coords) 108 | self.ones = torch.ones(self.batch_size, 1, self.height * self.width) 109 | self.pix_coords = torch.unsqueeze(torch.stack([self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0) 110 | self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1) 111 | self.pix_coords = torch.cat([self.pix_coords, self.ones], 1) 112 | def forward(self, depth, inv_K): 113 | cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords.cuda()) 114 | cam_points = depth.view(self.batch_size, 1, -1) * cam_points 115 | cam_points = torch.cat([cam_points, self.ones.cuda()], 1) 116 | return cam_points 117 | 118 | class Project(nn.Module): 119 | def __init__(self, batch_size, height, width, eps=1e-7): 120 | super(Project, self).__init__() 121 | self.batch_size = batch_size 122 | self.height = height 123 | self.width = width 124 | self.eps = eps 125 | def forward(self, points, K, T): 126 | P = torch.matmul(K, T)[:, :3, :] 127 | cam_points = torch.matmul(P, points) 128 | pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps) 129 | pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.width) 130 | point_depth = cam_points[:, 2, :].view(self.batch_size, 1, self.height, self.width) 131 | pix_coords = pix_coords.permute(0, 2, 3, 1) 132 | pix_coords[..., 0] /= self.width - 1 133 | pix_coords[..., 1] /= self.height - 1 134 | pix_coords = (pix_coords - 0.5) * 2 135 | return pix_coords, point_depth 136 | 137 | def upsample(x): 138 | return F.interpolate(x, scale_factor=2, mode="nearest") 139 | 140 | def get_smooth_loss(disp, img): 141 | """Computes the smoothness loss for a disparity image 142 | The color image is used for edge-aware smoothness 143 | """ 144 | grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:]) 145 | grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :]) 146 | grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True) 147 | grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True) 148 | grad_disp_x *= torch.exp(-grad_img_x) 149 | grad_disp_y *= torch.exp(-grad_img_y) 150 | return grad_disp_x.mean() + grad_disp_y.mean() 151 | 152 | class SSIM(nn.Module): 153 | """Layer to compute the SSIM loss between a pair of images 154 | """ 155 | def __init__(self): 156 | super(SSIM, self).__init__() 157 | self.mu_x_pool = nn.AvgPool2d(3, 1) 158 | self.mu_y_pool = nn.AvgPool2d(3, 1) 159 | self.sig_x_pool = nn.AvgPool2d(3, 1) 160 | self.sig_y_pool = nn.AvgPool2d(3, 1) 161 | self.sig_xy_pool = nn.AvgPool2d(3, 1) 162 | self.refl = nn.ReflectionPad2d(1) 163 | self.C1 = 0.01 ** 2 164 | self.C2 = 0.03 ** 2 165 | def forward(self, x, y): 166 | x = self.refl(x) 167 | y = self.refl(y) 168 | mu_x = self.mu_x_pool(x) 169 | mu_y = self.mu_y_pool(y) 170 | sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 171 | sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 172 | sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y 173 | SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) 174 | SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) 175 | return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) 176 | 177 | def compute_depth_errors(gt, pred): 178 | """Computation of error metrics between predicted and ground truth depths 179 | """ 180 | thresh = torch.max((gt / pred), (pred / gt)) 181 | a1 = (thresh < 1.25 ).float().mean() 182 | a2 = (thresh < 1.25 ** 2).float().mean() 183 | a3 = (thresh < 1.25 ** 3).float().mean() 184 | rmse = (gt - pred) ** 2 185 | rmse = torch.sqrt(rmse.mean()) 186 | rmse_log = (torch.log(gt) - torch.log(pred)) ** 2 187 | rmse_log = torch.sqrt(rmse_log.mean()) 188 | abs_rel = torch.mean(torch.abs(gt - pred) / gt) 189 | sq_rel = torch.mean((gt - pred) ** 2 / gt) 190 | return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 -------------------------------------------------------------------------------- /networks_dev/net.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | import skimage 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | # ---------- Structure Modules in DevNet ---------- 8 | from .pose_encoder import PoseEncoder 9 | from .pose_decoder import PoseDecoder 10 | from .resnet_encoder import ResnetEncoder # Feature Extractor In FeatDepth 11 | from .depth_decoder import DepthDecoder 12 | #from .depth_decoder2 import DepthDecoder 13 | 14 | # ---------- Manager System For DevNet ---------- 15 | from ..registry import MONO 16 | # ---------- Supporting Modules For DevNet ---------- 17 | from .utils import get_embedder 18 | from .utils import HomographySample 19 | from .utils import get_disparity_list 20 | from .utils import transformation_from_parameters # From prediction Result to Matrix. 21 | from .render_utils import get_xyz_from_plane_disparity # Get 3D Points from Disparity 22 | from .render_utils import get_tgt_xyz_from_plane_disparity 23 | from .render_utils import predict_density_from_disparity 24 | from .render_utils import render 25 | from .render_utils import inverse_matrix 26 | from .render_utils import render_tgt_depth 27 | from mono.datasets.utils import compute_errors 28 | from .layers import SSIM 29 | from .layers import Backproject 30 | from .layers import Project 31 | 32 | @MONO.register_module 33 | class mono_dev7(nn.Module): 34 | def __init__(self, options): 35 | super(mono_dev7, self).__init__() 36 | self.opt = options 37 | print(self.opt) 38 | # ----- Pose Module ----- 39 | self.use_alpha = False 40 | self.PoseEncoder = PoseEncoder(self.opt.pose_num_layers, self.opt.pose_pretrained_path) 41 | self.PoseDecoder = PoseDecoder(self.PoseEncoder.num_ch_enc, color = self.opt.use_color_loss) 42 | # ----- Depth Module ----- 43 | self.embedder, out_dim = get_embedder(self.opt.pos_encoding_multires) 44 | self.backbone = ResnetEncoder(num_layers = self.opt.resnet_num_layers, 45 | pretrained = self.opt.imagenet_pretrained, 46 | pretrained_path = self.opt.depth_pretrained_path) 47 | self.decoder = DepthDecoder(num_ch_enc = self.backbone.num_ch_enc, 48 | embedder = self.embedder, embedder_out_dim = out_dim, 49 | output_channels = self.opt.num_bins, use_alpha=self.use_alpha) 50 | 51 | #self.decoder = DepthDecoder(ch_enc = self.backbone.num_ch_enc, 52 | # num_output_channels = self.opt.num_bins) 53 | # ----- Resume ----- 54 | if self.opt.pretrained_depth is not None: 55 | self.resume(self.opt.pretrained_depth) 56 | # ----- Support Module ----- 57 | self.ssim = SSIM() 58 | self.backproject = Backproject(self.opt.imgs_per_gpu, self.opt.height, self.opt.width) 59 | self.project= Project(self.opt.imgs_per_gpu, self.opt.height, self.opt.width) 60 | self.feat_backproject = Backproject(self.opt.imgs_per_gpu, int(self.opt.height/2), int(self.opt.width/2)) 61 | self.feat_project = Project(self.opt.imgs_per_gpu, int(self.opt.height/2), int(self.opt.width/2)) 62 | self.init_data() 63 | 64 | # ---------- Support Module ---------- 65 | def init_data(self,): 66 | H, W = self.opt.height, self.opt.width 67 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 68 | self.homography_sampler_list = \ 69 | [HomographySample(int(H / 2), int(W / 2), device=device), 70 | HomographySample(int(H / 4), int(W / 4), device=device), 71 | HomographySample(int(H / 8), int(W / 8), device=device), 72 | HomographySample(int(H / 16), int(W / 16), device=device)] 73 | self.upsample_list = \ 74 | [nn.Upsample(size=(int(H / 2), int(W / 2))), 75 | nn.Upsample(size=(int(H / 4), int(W / 4))), 76 | nn.Upsample(size=(int(H / 8), int(W / 8))), 77 | nn.Upsample(size=(int(H / 16), int(W / 16)))] 78 | 79 | def resume(self, weights): 80 | self.load_state_dict(torch.load(weights)['state_dict']) 81 | 82 | # ---------- Modification ---------- 83 | def forward(self, inputs, iteration = 0, epoch = 0): 84 | outputs = {} 85 | if self.training: 86 | if self.opt.use_depth_loss: 87 | outputs.update(self.predict_poses(inputs)) 88 | density_list, disparity_list = self.network_process(inputs, 0) # (List of Featmaps), (B, S) 89 | for scale in self.opt.scales: 90 | outputs[("density", 0, scale)] = density_list[scale] 91 | outputs[("depth", 0, scale)] = self.calculate_depth(inputs, scale, density_list[scale], disparity_list) 92 | with torch.no_grad(): # To reduce memory request. 93 | density_list, disparity_list = self.network_process(inputs, -1) # (List of Featmaps), (B, S) 94 | for scale in self.opt.scales: 95 | outputs[("depth", -1, scale)] = self.calculate_depth(inputs, scale, density_list[scale], disparity_list) 96 | if self.opt.use_depth_loss_ts: 97 | G_src_tgt = outputs[("cam_T_cam", 0, -1)] 98 | G_tgt_src = inverse_matrix(G_src_tgt) 99 | outputs[("tgt_depth", -1, scale)], outputs[("tgt_depth_mask", -1, scale)] = self.calculate_tgt_depth(inputs, scale, density_list[scale], disparity_list, G_tgt_src) 100 | 101 | if self.opt.aug_consistency: 102 | with torch.no_grad(): 103 | density_list, disparity_list = self.network_process_nor(inputs, 0) # (List of Featmaps), (B, S) 104 | for scale in self.opt.scales: 105 | outputs[("depth_consistency", 0, scale)] = self.calculate_depth(inputs, scale, density_list[scale], disparity_list) 106 | 107 | else: 108 | frame = 0 109 | outputs.update(self.predict_poses(inputs)) 110 | density_list, disparity_list = self.network_process(inputs, frame) 111 | for scale in self.opt.scales: 112 | outputs[("density", 0, scale)] = density_list[scale] 113 | outputs[("depth", frame, scale)] = self.calculate_depth(inputs, scale, density_list[scale], disparity_list) 114 | '''Changing from the First Stage to Second One.''' 115 | loss_dict = self.compute_losses(inputs, outputs) 116 | return outputs, loss_dict 117 | density_src_list, disparity_src = self.network_process(inputs, frame = 0) 118 | outputs[("depth", 0, 0)] = self.calculate_depth(inputs, 0, density_src_list[0], disparity_src) 119 | return outputs 120 | 121 | # ---------- Modification ---------- 122 | def network_process(self, inputs, frame): 123 | img = inputs[('color_aug', frame, 0)] 124 | B = img.size(0) 125 | disparity_list = get_disparity_list(self.opt, B, device=img.device) # B, S 126 | density_list = predict_density_from_disparity(self.disp_predictor, img, disparity_list) 127 | return density_list, disparity_list 128 | 129 | def network_process_nor(self, inputs, frame): 130 | img = inputs[('color', frame, 0)] 131 | B = img.size(0) 132 | disparity_list = get_disparity_list(self.opt, B, device=img.device) # B, S 133 | density_list = predict_density_from_disparity(self.disp_predictor, img, disparity_list) 134 | return density_list, disparity_list 135 | 136 | # ---------- Modification ---------- 137 | def disp_predictor(self, src_imgs_BCHW, disparity_BS): 138 | conv1_out, block1_out, block2_out, block3_out, block4_out = self.backbone(src_imgs_BCHW) 139 | outputs = self.decoder([conv1_out, block1_out, block2_out, block3_out, block4_out], disparity_BS) 140 | output_list = [outputs[("disp", 0)], outputs[("disp", 1)], outputs[("disp", 2)], outputs[("disp", 3)]] 141 | return output_list 142 | 143 | # ---------- Modification ---------- 144 | def predict_poses(self, inputs): 145 | outputs = {} 146 | pose_feats = {f_i: F.interpolate(inputs["color_aug", f_i, 0], [192, 640], mode="bilinear", align_corners=False) for f_i in self.opt.frame_ids} 147 | for f_i in self.opt.frame_ids[1:]: 148 | if not f_i == "s": 149 | if f_i < 0: 150 | pose_inputs = [pose_feats[f_i], pose_feats[0]] 151 | else: 152 | pose_inputs = [pose_feats[0], pose_feats[f_i]] 153 | # from [0] -> [1] 154 | pose_inputs = self.PoseEncoder(torch.cat(pose_inputs, 1)) 155 | if not self.opt.use_color_loss: 156 | axisangle, translation = self.PoseDecoder(pose_inputs) 157 | elif self.opt.use_color_loss: 158 | axisangle, translation, color_a, color_b = self.PoseDecoder(pose_inputs) 159 | if f_i < 0: 160 | outputs[("color_a", f_i)] = 1/color_a 161 | outputs[("color_b", f_i)] = - color_b/color_a 162 | elif f_i > 0: 163 | outputs[("color_a", f_i)] = color_a 164 | outputs[("color_b", f_i)] = color_b 165 | outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters(axisangle[:, 0], \ 166 | translation[:, 0], invert=(f_i < 0)) 167 | return outputs 168 | 169 | # ---------- Modification ---------- 170 | def calculate_depth(self, inputs, scale, density_list, disparity): 171 | img = inputs[('color_aug', 0, 0)] 172 | K_scaled = inputs[('K')][:, :3, :3]/ (2 ** (scale)) 173 | K_scaled[:, 2, 2] = 1 174 | torch.cuda.synchronize() 175 | K_scaled_inv = torch.inverse(K_scaled) 176 | xyz_BS3HW = get_xyz_from_plane_disparity( 177 | self.homography_sampler_list[scale].meshgrid.to(img.device), \ 178 | disparity.to(img.device), K_scaled_inv.to(img.device)) 179 | depth_syn = render(density_list, xyz_BS3HW, self.use_alpha) 180 | return depth_syn 181 | 182 | def calculate_tgt_depth(self, inputs, scale, density_src_list, disparity_src, G_tgt_src): 183 | K_scaled = inputs[('K')][:, :3, :3]/ (2 ** (scale)) 184 | K_scaled[:, 2, 2] = 1 185 | K_scaled_inv = torch.inverse(K_scaled) 186 | torch.cuda.synchronize() 187 | # Apply scale factor 188 | if self.opt.stereo_scale: 189 | with torch.no_grad(): 190 | G_tgt_src = torch.clone(G_tgt_src) 191 | G_tgt_src[:, 0:3, 3] = G_tgt_src[:, 0:3, 3] / self.opt.STEREO_SCALE_FACTOR 192 | tgt_depth_syn, tgt_mask_syn = self.render_novel_view(density_src_list, 193 | disparity_src, G_tgt_src, 194 | K_scaled_inv, K_scaled, scale=scale) 195 | threshold = 8 196 | tgt_mask = torch.ge(tgt_mask_syn, threshold).to(torch.float32) 197 | return tgt_depth_syn, tgt_mask 198 | 199 | '''--------- LOSS FUNCTION ---------''' 200 | def compute_losses(self, inputs, outputs): 201 | loss_add = {} 202 | loss_dict = {} 203 | ''' ---------- scale involved in the COLOR_RECONSTRUCTION and SMOOTH LOSS ---------''' 204 | for scale in self.opt.scales: 205 | for frame_id in self.opt.frame_ids[1:]: 206 | """ initialization """ 207 | target = inputs[("color", 0, 0)] 208 | """ image_reconstruction_loss """ 209 | outputs = self.generate_images_pred_tgt_src(inputs, outputs, scale) 210 | if not self.opt.use_color_loss: 211 | pred = outputs[("color", frame_id, scale)] 212 | elif self.opt.use_color_loss: 213 | _, C, H, W = outputs[("color", frame_id, scale)].size() 214 | pred = outputs[("color", frame_id, scale)] * outputs[("color_a", frame_id)].unsqueeze(-1).repeat(1, C, H, W) \ 215 | + outputs[("color_b", frame_id)].unsqueeze(-1).repeat(1, C, H, W) 216 | loss_add[('repro_loss', frame_id, scale)] = self.compute_reprojection_loss(pred, target) 217 | 218 | """ automask """ 219 | if scale == 0: 220 | pred = inputs[("color", frame_id, 0)] 221 | identity_reprojection_loss = self.compute_reprojection_loss(pred, target) 222 | identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cuda() * 1e-5 223 | loss_add[('iden_loss', frame_id)] = identity_reprojection_loss 224 | _, iden_mask = torch.min(torch.cat([loss_add[('iden_loss', frame_id)], \ 225 | loss_add[('repro_loss', frame_id, scale)]], dim = 1), dim=1) 226 | loss_add[('iden_mask', frame_id, scale)] = iden_mask.detach().float().unsqueeze(1) 227 | 228 | """ Color_Reconstruction_Loss. """ 229 | reprojection_losses = [] 230 | for frame_id in self.opt.frame_ids[1:]: 231 | loss_add[('repro_loss', frame_id, scale)] = loss_add[('repro_loss', frame_id, scale)]*loss_add[('iden_mask', frame_id, scale)] + loss_add[('iden_loss', frame_id)]*(1 - loss_add[('iden_mask', frame_id, scale)]) 232 | reprojection_losses.append(loss_add[('repro_loss', frame_id, scale)]) 233 | loss_dict[('repro_loss', scale)], mask = torch.min(torch.cat(reprojection_losses, dim=1), dim=1) 234 | loss_dict[('repro_loss', scale)] = loss_dict[('repro_loss', scale)].mean()/len(self.opt.scales) 235 | 236 | if self.opt.use_depth_loss: 237 | loss_add.update(self.generate_occlusion(inputs, outputs, loss_add, scale)) 238 | mask_depth = (1 - mask) * loss_add[('iden_mask', -1, scale)] 239 | loss_dict[('depth_loss', scale)] = loss_add[('depth_loss', scale)] 240 | loss_dict[('depth_loss', scale)] = loss_dict[('depth_loss', scale)] * mask_depth 241 | loss_dict[('depth_loss', scale)] = self.opt.depth_weight * loss_dict[('depth_loss', scale)].mean() 242 | 243 | if self.opt.use_var_loss: 244 | var_depth = self.opt.var_weight * outputs[("density", 0, scale)].var(1).mean() 245 | 246 | if self.opt.use_depth_loss_ts: 247 | loss_add.update(self.generate_depth_loss(outputs, loss_add, scale)) 248 | loss_dict[('depth_loss_ts', scale)] = loss_add[("depth_loss_ts", scale)]*mask_depth 249 | loss_dict[('depth_loss_ts', scale)] = self.opt.depth_weight_ts * loss_dict[('depth_loss_ts', scale)].mean() 250 | 251 | """ smooth loss (Color Space)""" 252 | if self.opt.use_smooth_loss: 253 | if self.opt.disp_norm: 254 | disp = torch.reciprocal(outputs[("depth", 0, scale)]) 255 | mean_disp = disp.mean(2, True).mean(3, True) 256 | disp = disp / (mean_disp + 1e-7) 257 | else: 258 | disp = torch.reciprocal(outputs[("depth", 0, scale)]) 259 | target = inputs[("color", 0, 0)] 260 | smooth_loss = self.get_smooth_loss(disp, target) 261 | loss_dict[('smooth_loss', scale)] = self.opt.smoothness_weight * smooth_loss /\ 262 | (2 ** scale)/len(self.opt.scales) 263 | 264 | """ smooth loss (Color Space)""" 265 | if self.opt.aug_consistency: 266 | loss_dict[('aug_consist_loss', scale)] = self.opt.consist_weight * self.robust_l1(outputs[("depth", 0, scale)], outputs[("depth_consistency", 0, scale)]).mean(1, True) 267 | 268 | """ Color_Parameters_Loss. """ 269 | if self.opt.use_color_loss: 270 | for frame_id in self.opt.frame_ids[1:]: 271 | loss_dict[('color_loss', frame_id, 0)] = self.opt.color_weight * ((outputs[('color_a', frame_id)]-1)**2 + outputs[('color_b', frame_id)]**2) 272 | return loss_dict 273 | 274 | def generate_images_pred_tgt_src(self, inputs, outputs, scale): 275 | # Bi-Direction 276 | depth = outputs[("depth", 0, scale)] 277 | depth = F.interpolate(depth, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 278 | for _, frame_id in enumerate(self.opt.frame_ids[1:]): 279 | if frame_id == "s": 280 | T = inputs["stereo_T"] 281 | else: 282 | T = outputs[("cam_T_cam", 0, frame_id)] 283 | cam_points = self.backproject(depth, torch.inverse(inputs[("K")])) 284 | pix_coords, _ = self.project(cam_points, inputs[("K")], T) #[b,h,w,2] 285 | img = inputs[("color", frame_id, 0)] 286 | outputs[("color", frame_id, scale)] = F.grid_sample(img, pix_coords, padding_mode="border") 287 | return outputs 288 | 289 | def generate_occlusion(self, inputs, outputs, loss_add, scale): 290 | depth = outputs[("depth", 0, scale)] 291 | tgt_depth = F.interpolate(depth, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 292 | 293 | T = outputs[("cam_T_cam", 0, -1)] 294 | depth = outputs[("depth", -1, scale)] 295 | src_depth = F.interpolate(depth, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 296 | cam_points = self.backproject(tgt_depth, torch.inverse(inputs[("K")])) 297 | pix_src_tgt, tgt_src_depth1 = self.project(cam_points, inputs[("K")], T) 298 | 299 | tgt_src_depth2 = F.grid_sample(src_depth, pix_src_tgt, mode="nearest", padding_mode="border") 300 | tgt_src_transform = (tgt_src_depth1 - tgt_src_depth2).abs() 301 | variable_bar = (tgt_depth - src_depth).abs() 302 | 303 | loss_add[("depth_loss", scale)] = (tgt_src_transform/(tgt_src_depth1 + tgt_src_depth2)) 304 | loss_add[("depth_loss_iden", scale)] = (variable_bar/(tgt_depth + src_depth)) 305 | return loss_add 306 | 307 | def generate_depth_loss(self, outputs, loss_add, scale): 308 | tgt_depth = F.interpolate(outputs[("depth", 0, scale)], [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 309 | tgt_src_depth = F.interpolate(outputs[("tgt_depth", -1, scale)], [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 310 | tgt_src_mask = F.interpolate(outputs[("tgt_depth_mask", -1, scale)], [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) 311 | tgt_src_transform = (tgt_depth - tgt_src_depth).abs() 312 | loss_add[("depth_loss_ts", scale)] = (tgt_src_transform/(tgt_depth + tgt_src_depth))*tgt_src_mask 313 | return loss_add 314 | 315 | def compute_reprojection_loss(self, pred, target): 316 | photometric_loss = self.robust_l1(pred, target).mean(1, True) 317 | ssim_loss = self.ssim(pred, target).mean(1, True) 318 | reprojection_loss = (0.85 * ssim_loss + 0.15 * photometric_loss) 319 | return reprojection_loss 320 | 321 | def robust_l1(self, pred, target): 322 | eps = 1e-3 323 | return torch.sqrt(torch.pow(target - pred, 2) + eps ** 2) 324 | 325 | def get_smooth_loss(self, disp, img): 326 | _, _, h, w = disp.size() 327 | a1 = 0.5 328 | a2 = 0.5 329 | img = F.interpolate(img, (h, w), mode='area') 330 | disp_dx, disp_dy = self.gradient(disp) 331 | img_dx, img_dy = self.gradient(img) 332 | disp_dxx, disp_dxy = self.gradient(disp_dx) 333 | disp_dyx, disp_dyy = self.gradient(disp_dy) 334 | img_dxx, img_dxy = self.gradient(img_dx) 335 | img_dyx, img_dyy = self.gradient(img_dy) 336 | smooth1 = torch.mean(disp_dx.abs() * torch.exp(-a1 * img_dx.abs().mean(1, True))) + \ 337 | torch.mean(disp_dy.abs() * torch.exp(-a1 * img_dy.abs().mean(1, True))) 338 | smooth2 = torch.mean(disp_dxx.abs() * torch.exp(-a2 * img_dxx.abs().mean(1, True))) + \ 339 | torch.mean(disp_dxy.abs() * torch.exp(-a2 * img_dxy.abs().mean(1, True))) + \ 340 | torch.mean(disp_dyx.abs() * torch.exp(-a2 * img_dyx.abs().mean(1, True))) + \ 341 | torch.mean(disp_dyy.abs() * torch.exp(-a2 * img_dyy.abs().mean(1, True))) 342 | return smooth1+smooth2 343 | 344 | def gradient(self, D): 345 | D_dy = D[:, :, 1:] - D[:, :, :-1] 346 | D_dx = D[:, :, :, 1:] - D[:, :, :, :-1] 347 | return D_dx, D_dy 348 | 349 | def render_novel_view(self, mpi_all_sigma_src, 350 | disparity_all_src, G_tgt_src, 351 | K_src_inv, K_tgt, scale=0): 352 | xyz_src_BS3HW = get_xyz_from_plane_disparity( 353 | self.homography_sampler_list[scale].meshgrid, disparity_all_src, K_src_inv) 354 | xyz_tgt_BS3HW = get_tgt_xyz_from_plane_disparity( 355 | xyz_src_BS3HW.to(mpi_all_sigma_src.device), G_tgt_src.to(mpi_all_sigma_src.device)) 356 | # Bx1xHxW, Bx3xHxW, Bx1xHxW 357 | tgt_depth_syn, tgt_mask_syn = render_tgt_depth( 358 | self.homography_sampler_list[scale], 359 | mpi_all_sigma_src, disparity_all_src, xyz_tgt_BS3HW, 360 | G_tgt_src, K_src_inv, K_tgt, use_alpha=self.use_alpha) 361 | return tgt_depth_syn, tgt_mask_syn -------------------------------------------------------------------------------- /networks_dev/pose_decoder.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import torch 4 | import torch.nn as nn 5 | from collections import OrderedDict 6 | 7 | 8 | class PoseDecoder(nn.Module): 9 | def __init__(self, num_ch_enc, num_input_features, num_frames_to_predict_for=None, stride=1): 10 | super(PoseDecoder, self).__init__() 11 | #num_ch_enc = [64,64,128,256,512] 12 | #num_input_features = 1 13 | #num_frames_to_predict_for = 2 14 | self.num_ch_enc = num_ch_enc 15 | self.num_input_features = num_input_features 16 | 17 | if num_frames_to_predict_for is None: 18 | num_frames_to_predict_for = num_input_features - 1 19 | self.num_frames_to_predict_for = num_frames_to_predict_for 20 | 21 | self.convs = OrderedDict() 22 | self.convs[("squeeze")] = nn.Conv2d(self.num_ch_enc[-1], 256, 1) 23 | self.convs[("pose", 0)] = nn.Conv2d(num_input_features * 256, 256, 3, stride, 1) 24 | self.convs[("pose", 1)] = nn.Conv2d(256, 256, 3, stride, 1) 25 | self.convs[("pose", 2)] = nn.Conv2d(256, 6 * num_frames_to_predict_for, 1) 26 | 27 | self.relu = nn.ReLU()#in depthdecoder activation function is sigmoid() 28 | 29 | self.net = nn.ModuleList(list(self.convs.values())) 30 | 31 | def forward(self, input_features): 32 | #input_features is a list which just has a element but the element has 5 scales feature maps. 33 | last_features = [f[-1] for f in input_features]#only collect last_feature? 34 | #so last_features only has a 512*6*20 feature map 35 | #print(last_features[0].size()) 36 | cat_features = [self.relu(self.convs["squeeze"](f)) for f in last_features] 37 | cat_features = torch.cat(cat_features,1) 38 | out = cat_features 39 | for i in range(3): 40 | out = self.convs[("pose", i)](out) 41 | if i != 2: 42 | out = self.relu(out) 43 | out = out.mean(3).mean(2) 44 | #out.size = 12*12 45 | out = 0.01 * out.view(-1, self.num_frames_to_predict_for, 1, 6) 46 | #out.size = 12 * 2 * 1 * 6 47 | axisangle = out[..., :3] 48 | translation = out[..., 3:] 49 | #print(axisangle.size()) 50 | #print(translation.size()) 51 | #input() 52 | return axisangle, translation 53 | #return 2 tensors which size is 12 * 2 * 1 * 3 -------------------------------------------------------------------------------- /networks_dev/render_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .utils import HomographySample 3 | 4 | # ---------- Modification ---------- 5 | def get_xyz_from_plane_disparity(meshgrid_homo, mpi_disparity, K_inv): 6 | """:param meshgrid_homo: 3xHxW :param mpi_disparity: BxS :param K_inv: Bx3x3 :return: """ 7 | B, S = mpi_disparity.size() 8 | H, W = meshgrid_homo.size(1), meshgrid_homo.size(2) 9 | mpi_depth = torch.reciprocal(mpi_disparity) # BxS from the disparity to depth 10 | K_inv_Bs33 = K_inv.unsqueeze(1).repeat(1, S, 1, 1).reshape(B * S, 3, 3) 11 | meshgrid_homo = meshgrid_homo.unsqueeze(0).unsqueeze(1).repeat(B, S, 1, 1, 1) # 3xHxW -> BxSx3xHxW 12 | meshgrid_homo_Bs3N = meshgrid_homo.reshape(B * S, 3, -1) 13 | xyz = torch.matmul(K_inv_Bs33, meshgrid_homo_Bs3N) # BSx3xHW 14 | xyz = xyz.reshape(B, S, 3, H * W) * mpi_depth.unsqueeze(2).unsqueeze(3) # BxSx3xHW 15 | xyz_BS3HW = xyz.reshape(B, S, 3, H, W) 16 | return xyz_BS3HW 17 | 18 | def predict_density_from_disparity(disp_predictor, imgs, disparity_coarse): 19 | density_list = disp_predictor(imgs, disparity_coarse) # BxS_coarsex1xHxW 20 | return density_list 21 | 22 | def render(sigma_BS1HW, xyz_BS3HW, use_alpha): 23 | if not use_alpha: 24 | depth_syn = plane_volume_rendering(sigma_BS1HW, xyz_BS3HW) 25 | else: 26 | depth_syn = alpha_composition(sigma_BS1HW, xyz_BS3HW[:, :, 2:]) 27 | return depth_syn 28 | 29 | def alpha_composition(alpha_BK1HW, value_BKCHW): 30 | B, K, _, H, W = alpha_BK1HW.size() 31 | alpha_comp_cumprod = torch.cumprod(1 - alpha_BK1HW, dim=1) # BxKx1xHxW 32 | preserve_ratio = torch.cat((torch.ones((B, 1, 1, H, W), dtype=alpha_BK1HW.dtype, device=alpha_BK1HW.device), 33 | alpha_comp_cumprod[:, 0:K-1, :, :, :]), dim=1) # BxKx1xHxW 34 | weights = alpha_BK1HW * preserve_ratio # BxKx1xHxW 35 | value_composed = torch.sum(value_BKCHW * weights, dim=1, keepdim=False) # Bx3xHxW 36 | return value_composed 37 | 38 | def plane_volume_rendering(sigma_BS1HW, xyz_BS3HW): 39 | B, _, _, H, W = sigma_BS1HW.size() 40 | xyz_diff_BS3HW = xyz_BS3HW[:, 1:, :, :, :] - xyz_BS3HW[:, 0:-1, :, :, :] # Bx(S-1)x3xHxW 41 | xyz_dist_BS1HW = torch.norm(xyz_diff_BS3HW, dim=2, keepdim=True) # Bx(S-1)x1xHxW 42 | xyz_dist_BS1HW = torch.cat((xyz_dist_BS1HW, torch.full((B, 1, 1, H, W), fill_value=1e3, 43 | dtype=xyz_BS3HW.dtype, device=xyz_BS3HW.device)), dim=1) # BxSx3xHxW 44 | transparency = torch.exp(-sigma_BS1HW * xyz_dist_BS1HW) # BxSx1xHxW 45 | alpha = 1 - transparency # BxSx1xHxW 46 | # pytorch.cumprod is like: [a, b, c] -> [a, a*b, a*b*c], we need to modify it to [1, a, a*b] 47 | transparency_acc = torch.cumprod(transparency + 1e-6, dim=1) # Bx(S-1)x1xHxW 48 | transparency_acc = torch.cat((torch.ones((B, 1, 1, H, W), dtype=transparency.dtype, device=transparency.device), 49 | transparency_acc[:, 0:-1, :, :, :]), dim=1) # BxSx1xHxW 50 | weights = transparency_acc * alpha # BxSx1xHxW 51 | depth_out = weighted_sum_disp(xyz_BS3HW, weights) # Bxs 52 | return depth_out 53 | 54 | def weighted_sum_disp(xyz_BS3HW, weights): 55 | # Weights BxSx1xHxW 56 | weights_sum = torch.sum(weights, dim=1, keepdim=False) # Bx1xHxW 57 | depth_out = torch.sum(weights * xyz_BS3HW[:, :, 2:, :, :], dim=1, keepdim=False)/(weights_sum + 1e-5) # Bx1xHxW 58 | return depth_out 59 | 60 | def inverse_matrix(RT): 61 | # RT Bx4x4 62 | RT_inv = torch.eye(4,4)[None].repeat(RT.size(0),1,1) 63 | R = RT[:, :3, :3] # B, 3, 3 64 | T = RT[:, :3, 3:] # B, 3, 1 65 | R_inv = torch.linalg.inv(R) # B, 3, 3 66 | T_inv = - torch.matmul(R_inv, T) # B, 3, 1 67 | RT_inv[:, :3, :3] = R_inv 68 | RT_inv[:, :3, 3:] = T_inv 69 | return RT_inv # B, 3, 3 70 | 71 | def get_tgt_xyz_from_plane_disparity(xyz_src_BS3HW, G_tgt_src): 72 | """:param xyz_src_BS3HW: BxSx3xHxW 73 | :param G_tgt_src: Bx4x4 """ 74 | B, S, _, H, W = xyz_src_BS3HW.size() 75 | G_tgt_src_Bs33 = G_tgt_src.unsqueeze(1).repeat(1, S, 1, 1).reshape(B*S, 4, 4) 76 | xyz_tgt = transform_G_xyz(G_tgt_src_Bs33, xyz_src_BS3HW.reshape(B*S, 3, H*W)) # Bsx3xHW 77 | xyz_tgt_BS3HW = xyz_tgt.reshape(B, S, 3, H, W) # BxSx3xHxW 78 | return xyz_tgt_BS3HW 79 | 80 | def transform_G_xyz(G, xyz, is_return_homo=False): 81 | """:param G: Bx4x4 82 | :param xyz: Bx3xN""" 83 | assert len(G.size()) == len(xyz.size()) 84 | if len(G.size()) == 2: 85 | G_B44 = G.unsqueeze(0) 86 | xyz_B3N = xyz.unsqueeze(0) 87 | else: 88 | G_B44 = G 89 | xyz_B3N = xyz 90 | xyz_B4N = torch.cat((xyz_B3N, torch.ones_like(xyz_B3N[:, 0:1, :])), dim=1) 91 | G_xyz_B4N = torch.matmul(G_B44, xyz_B4N) 92 | if is_return_homo: 93 | return G_xyz_B4N 94 | else: 95 | return G_xyz_B4N[:, 0:3, :] 96 | 97 | def render_tgt_depth(H_sampler: HomographySample, 98 | mpi_sigma_src, mpi_disparity_src, 99 | xyz_tgt_BS3HW, G_tgt_src, 100 | K_src_inv, K_tgt, use_alpha=False, 101 | is_bg_depth_inf=False): 102 | """:param H_sampler: 103 | :param mpi_sigma_src: BxSx1xHxW 104 | :param mpi_disparity_src: BxS 105 | :param xyz_tgt_BS3HW: BxSx3xHxW 106 | :param G_tgt_src: Bx4x4 107 | :param K_src_inv: Bx3x3 108 | :param K_tgt: Bx3x3 """ 109 | B, S, _, H, W = mpi_sigma_src.size() 110 | mpi_depth_src = torch.reciprocal(mpi_disparity_src) # BxS 111 | # note that here we concat the mpi_src with xyz_tgt, because H_sampler will sample them for tgt frame 112 | # mpi_src is the same in whatever frame, but xyz has to be in tgt frame 113 | mpi_xyz_src = torch.cat((mpi_sigma_src, xyz_tgt_BS3HW), dim=2) # BxSx(3+1+3)xHxW 114 | # homography warping of mpi_src into tgt frame 115 | G_tgt_src_Bs44 = G_tgt_src.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 4, 4) # Bsx4x4 116 | K_src_inv_Bs33 = K_src_inv.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 3, 3) # Bsx3x3 117 | K_tgt_Bs33 = K_tgt.unsqueeze(1).repeat(1, S, 1, 1).contiguous().reshape(B*S, 3, 3) # Bsx3x3 118 | # BsxCxHxW, BsxHxW 119 | tgt_mpi_xyz_BsCHW, tgt_mask_BsHW = H_sampler.sample(mpi_xyz_src.view(B*S, 4, H, W), 120 | mpi_depth_src.view(B*S), 121 | G_tgt_src_Bs44, 122 | K_src_inv_Bs33, 123 | K_tgt_Bs33) 124 | # mpi composition 125 | tgt_mpi_xyz = tgt_mpi_xyz_BsCHW.view(B, S, 4, H, W) 126 | tgt_sigma_BS1HW = tgt_mpi_xyz[:, :, 0:1, :, :] 127 | tgt_xyz_BS3HW = tgt_mpi_xyz[:, :, 1:, :, :] 128 | tgt_mask_BSHW = tgt_mask_BsHW.view(B, S, H, W) 129 | tgt_mask_BSHW = torch.where(tgt_mask_BSHW, 130 | torch.ones((B, S, H, W), dtype=torch.float32, device=mpi_sigma_src.device), 131 | torch.zeros((B, S, H, W), dtype=torch.float32, device=mpi_sigma_src.device)) 132 | # Bx3xHxW, Bx1xHxW, Bx1xHxW 133 | tgt_z_BS1HW = tgt_xyz_BS3HW[:, :, -1:] 134 | tgt_sigma_BS1HW = torch.where(tgt_z_BS1HW >= 0, 135 | tgt_sigma_BS1HW, 136 | torch.zeros_like(tgt_sigma_BS1HW, device=tgt_sigma_BS1HW.device)) 137 | tgt_depth_syn = render(tgt_sigma_BS1HW, tgt_xyz_BS3HW, 138 | use_alpha=use_alpha) 139 | tgt_mask = torch.sum(tgt_mask_BSHW, dim=1, keepdim=True) # Bx1xHxW 140 | return tgt_depth_syn, tgt_mask -------------------------------------------------------------------------------- /networks_dev/resnet.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import torch 3 | import torch.nn as nn 4 | from torch.nn import BatchNorm2d as bn 5 | 6 | def conv3x3(in_planes, out_planes, stride=1): 7 | """3x3 convolution with padding""" 8 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) 9 | 10 | 11 | def conv1x1(in_planes, out_planes, stride=1): 12 | """1x1 convolution""" 13 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 14 | 15 | 16 | class BasicBlock(nn.Module): 17 | expansion = 1 18 | 19 | def __init__(self, inplanes, planes, stride=1, downsample=None): 20 | super(BasicBlock, self).__init__() 21 | self.conv1 = conv3x3(inplanes, planes, stride) 22 | self.bn1 = bn(planes) 23 | self.relu = nn.ReLU(inplace=True) 24 | self.conv2 = conv3x3(planes, planes) 25 | self.bn2 = bn(planes) 26 | self.downsample = downsample 27 | self.stride = stride 28 | 29 | def forward(self, x): 30 | residual = x 31 | 32 | out = self.conv1(x) 33 | out = self.bn1(out) 34 | out = self.relu(out) 35 | 36 | out = self.conv2(out) 37 | out = self.bn2(out) 38 | 39 | if self.downsample is not None: 40 | residual = self.downsample(x) 41 | 42 | out += residual 43 | out = self.relu(out) 44 | 45 | return out 46 | 47 | 48 | class Bottleneck(nn.Module): 49 | expansion = 4 50 | 51 | def __init__(self, inplanes, planes, stride=1, downsample=None): 52 | super(Bottleneck, self).__init__() 53 | self.conv1 = conv1x1(inplanes, planes) 54 | self.bn1 = bn(planes) 55 | self.conv2 = conv3x3(planes, planes, stride) 56 | self.bn2 = bn(planes) 57 | self.conv3 = conv1x1(planes, planes * self.expansion) 58 | self.bn3 = bn(planes * self.expansion) 59 | self.relu = nn.ReLU(inplace=True) 60 | self.downsample = downsample 61 | self.stride = stride 62 | 63 | def forward(self, x): 64 | residual = x 65 | 66 | out = self.conv1(x) 67 | out = self.bn1(out) 68 | out = self.relu(out) 69 | 70 | out = self.conv2(out) 71 | out = self.bn2(out) 72 | out = self.relu(out) 73 | 74 | out = self.conv3(out) 75 | out = self.bn3(out) 76 | 77 | if self.downsample is not None: 78 | residual = self.downsample(x) 79 | 80 | out += residual 81 | out = self.relu(out) 82 | 83 | return out 84 | 85 | 86 | class ResNet(nn.Module): 87 | 88 | def __init__(self, block, layers, num_classes=1000): 89 | super(ResNet, self).__init__() 90 | self.inplanes = 64 91 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) 92 | self.bn1 = bn(64) 93 | self.relu = nn.ReLU(inplace=True) 94 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 95 | self.layer1 = self._make_layer(block, 64, layers[0]) 96 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 97 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 98 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 99 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 100 | self.fc = nn.Linear(512 * block.expansion, num_classes) 101 | 102 | for m in self.modules(): 103 | if isinstance(m, nn.Conv2d): 104 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 105 | elif isinstance(m, bn): 106 | nn.init.constant_(m.weight, 1) 107 | nn.init.constant_(m.bias, 0) 108 | 109 | def _make_layer(self, block, planes, blocks, stride=1): 110 | downsample = None 111 | if stride != 1 or self.inplanes != planes * block.expansion: 112 | downsample = nn.Sequential( 113 | conv1x1(self.inplanes, planes * block.expansion, stride), 114 | bn(planes * block.expansion),) 115 | 116 | layers = [] 117 | layers.append(block(self.inplanes, planes, stride, downsample)) 118 | self.inplanes = planes * block.expansion 119 | for _ in range(1, blocks): 120 | layers.append(block(self.inplanes, planes)) 121 | return nn.Sequential(*layers) 122 | 123 | def forward(self, x): 124 | x = self.conv1(x) 125 | x = self.bn1(x) 126 | x = self.relu(x) 127 | x = self.maxpool(x) 128 | x = self.layer1(x) 129 | x = self.layer2(x) 130 | x = self.layer3(x) 131 | x = self.layer4(x) 132 | 133 | return x 134 | 135 | 136 | def resnet18(pretrained_path=None): 137 | """Constructs a ResNet-18 model. 138 | Args: 139 | pretrained (bool): If True, returns a model pre-trained on ImageNet 140 | """ 141 | model = ResNet(BasicBlock, [2, 2, 2, 2]) 142 | if pretrained_path is not None: 143 | model.load_state_dict(torch.load(pretrained_path)) 144 | print('Loaded pre-trained weights') 145 | return model 146 | 147 | 148 | def resnet34(pretrained_path=None, **kwargs): 149 | """Constructs a ResNet-34 model. 150 | Args: 151 | pretrained (bool): If True, returns a model pre-trained on ImageNet 152 | """ 153 | model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) 154 | if pretrained_path is not None: 155 | model.load_state_dict(torch.load(osp.join(pretrained_path, 'resnet34.pth'))) 156 | print('Loaded pre-trained weights') 157 | return model 158 | 159 | 160 | def resnet50(pretrained_path=None, **kwargs): 161 | """Constructs a ResNet-50 model. 162 | Args: 163 | pretrained (bool): If True, returns a model pre-trained on ImageNet 164 | """ 165 | model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) 166 | if pretrained_path is not None: 167 | model.load_state_dict(torch.load(osp.join(pretrained_path, 'resnet50.pth'))) 168 | print('Loaded pre-trained weights') 169 | return model 170 | 171 | 172 | def resnet101(pretrained_path=None, **kwargs): 173 | """Constructs a ResNet-101 model. 174 | Args: 175 | pretrained (bool): If True, returns a model pre-trained on ImageNet 176 | """ 177 | model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) 178 | if pretrained_path is not None: 179 | model.load_state_dict(torch.load(osp.join(pretrained_path, 'resnet101.pth'))) 180 | print('Loaded pre-trained weights') 181 | return model 182 | -------------------------------------------------------------------------------- /networks_dev/resnet_encoder.py: -------------------------------------------------------------------------------- 1 | # 2 | # This software is licensed under the terms of the Monodepth2 licence 3 | # which allows for non-commercial use only, the full terms of which are made 4 | # available in the LICENSE file. 5 | from __future__ import absolute_import, division, print_function 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | import torch.distributed as dist 10 | import torchvision.models as models 11 | import torch.utils.model_zoo as model_zoo 12 | 13 | class ResNetMultiImageInput(models.ResNet): 14 | def __init__(self, block, layers, num_input_images=1): 15 | super(ResNetMultiImageInput, self).__init__(block, layers) 16 | self.inplanes = 64 17 | self.conv1 = nn.Conv2d( 18 | num_input_images * 3, 64, kernel_size=7, stride=2, padding=3, bias=False) 19 | self.bn1 = nn.BatchNorm2d(64) 20 | self.relu = nn.ReLU(inplace=True) 21 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 22 | self.layer1 = self._make_layer(block, 64, layers[0]) 23 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 24 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 25 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 26 | for m in self.modules(): 27 | if isinstance(m, nn.Conv2d): 28 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 29 | elif isinstance(m, nn.BatchNorm2d): 30 | nn.init.constant_(m.weight, 1) 31 | nn.init.constant_(m.bias, 0) 32 | 33 | def resnet_multiimage_input(num_layers, pretrained=False, num_input_images=1, pretrained_path=None): 34 | assert num_layers in [18, 50], "Can only run with 18 or 50 layer resnet" 35 | blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers] 36 | block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers] 37 | model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images) 38 | if pretrained: 39 | loaded = torch.load(pretrained_path) 40 | loaded['conv1.weight'] = torch.cat([loaded['conv1.weight']] * num_input_images, 1) / num_input_images 41 | model.load_state_dict(loaded) 42 | return model 43 | 44 | class ResnetEncoder(nn.Module): 45 | def __init__(self, num_layers, pretrained, num_input_images=1, pretrained_path=None): 46 | super(ResnetEncoder, self).__init__() 47 | self.num_ch_enc = np.array([64, 64, 128, 256, 512]) 48 | resnets = {18: models.resnet18, 49 | 34: models.resnet34, 50 | 50: models.resnet50, 51 | 101: models.resnet101, 52 | 152: models.resnet152} 53 | if num_layers not in resnets: 54 | raise ValueError("{} is not a valid number of resnet layers".format(num_layers)) 55 | if num_input_images > 1: 56 | self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images, pretrained_path) 57 | else: 58 | self.encoder = resnets[num_layers]() 59 | if pretrained_path is not None: 60 | checkpoint = torch.load(pretrained_path) 61 | self.encoder.load_state_dict(checkpoint) 62 | if num_layers > 34: 63 | self.num_ch_enc[1:] *= 4 64 | print('DepthEncoder------------', pretrained_path) 65 | def forward(self, input_image): 66 | self.features = [] 67 | x = (input_image - 0.45) / 0.225 68 | x = self.encoder.conv1(x) 69 | x = self.encoder.bn1(x) 70 | conv1_out = self.encoder.relu(x) 71 | block1_out = self.encoder.layer1(self.encoder.maxpool(conv1_out)) 72 | block2_out = self.encoder.layer2(block1_out) 73 | block3_out = self.encoder.layer3(block2_out) 74 | block4_out = self.encoder.layer4(block3_out) 75 | return conv1_out, block1_out, block2_out, block3_out, block4_out 76 | -------------------------------------------------------------------------------- /networks_dev/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import numpy as np 4 | 5 | import torch 6 | from scipy.spatial.transform import Rotation 7 | 8 | # ----------- Modification ---------- 9 | class Embedder(object): 10 | # Positional encoding (section 5.1) 11 | def __init__(self, **kwargs): 12 | self.kwargs = kwargs 13 | self.create_embedding_fn() 14 | def create_embedding_fn(self): 15 | embed_fns = [] 16 | d = self.kwargs["input_dims"] 17 | out_dim = 0 18 | if self.kwargs["include_input"]: 19 | embed_fns.append(lambda x: x) 20 | out_dim += d 21 | max_freq = self.kwargs["max_freq_log2"] 22 | N_freqs = self.kwargs["num_freqs"] 23 | if self.kwargs["log_sampling"]: 24 | freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs) 25 | else: 26 | freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs) 27 | for freq in freq_bands: 28 | for p_fn in self.kwargs["periodic_fns"]: 29 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) 30 | out_dim += d 31 | self.embed_fns = embed_fns 32 | self.out_dim = out_dim 33 | def embed(self, inputs): 34 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1) 35 | 36 | def get_embedder(multires): 37 | embed_kwargs = {"include_input": True, 38 | "input_dims": 1, 39 | "max_freq_log2": multires - 1, 40 | "num_freqs": multires, 41 | "log_sampling": True, 42 | "periodic_fns": [torch.sin, torch.cos],} 43 | embedder_obj = Embedder(**embed_kwargs) 44 | def embed(x, eo=embedder_obj): 45 | return eo.embed(x) 46 | return embed, embedder_obj.out_dim 47 | 48 | # ---------- Modification ---------- 49 | class HomographySample: 50 | def __init__(self, H, W, device=None): 51 | if device is None: 52 | self.device = torch.device("cpu") 53 | else: 54 | self.device = device 55 | self.Height_tgt = H 56 | self.Width_tgt = W 57 | self.meshgrid = self.grid_generation(self.Height_tgt, self.Width_tgt, self.device) 58 | self.meshgrid = self.meshgrid.permute(2, 0, 1).contiguous() # 3xHxW 59 | self.n = self.plane_normal_generation(self.device) 60 | @staticmethod 61 | def grid_generation(H, W, device): 62 | x = np.linspace(0, W-1, W) 63 | y = np.linspace(0, H-1, H) 64 | # Inversing the order due to the usage of numpy 65 | xv, yv = np.meshgrid(x, y) # HxW 66 | xv = torch.from_numpy(xv.astype(np.float32)).to(dtype=torch.float32, device=device) 67 | yv = torch.from_numpy(yv.astype(np.float32)).to(dtype=torch.float32, device=device) 68 | ones = torch.ones_like(xv) 69 | meshgrid = torch.stack((xv, yv, ones), dim=2) # HxWx3 70 | return meshgrid #(H,W,3); x - [0, W]; y - [0, H] 71 | @staticmethod 72 | def plane_normal_generation(device): 73 | n = torch.tensor([0, 0, 1], dtype=torch.float32, device=device) 74 | return n 75 | def sample(self, src_BCHW, d_src_B, G_tgt_src, 76 | K_src_inv, K_tgt): 77 | """ 78 | Coordinate system: x, y are the image directions, z is pointing to depth direction 79 | :param src_BCHW: torch tensor float, 0-1, rgb/rgba. BxCxHxW 80 | Assume to be at position P=[I|0] 81 | :param d_src_B: distance of image plane to src camera origin 82 | :param G_tgt_src: Bx4x4 83 | :param K_src_inv: Bx3x3 84 | :param K_tgt: Bx3x3 85 | :return: tgt_BCHW 86 | """ 87 | # parameter processing ------ begin ------ 88 | B, channels, Height_src, Width_src = src_BCHW.size(0), src_BCHW.size(1), src_BCHW.size(2), src_BCHW.size(3) 89 | R_tgt_src = G_tgt_src[:, 0:3, 0:3] 90 | t_tgt_src = G_tgt_src[:, 0:3, 3] 91 | Height_tgt = self.Height_tgt 92 | Width_tgt = self.Width_tgt 93 | R_tgt_src = R_tgt_src.to(device=src_BCHW.device) 94 | t_tgt_src = t_tgt_src.to(device=src_BCHW.device) 95 | K_src_inv = K_src_inv.to(device=src_BCHW.device) 96 | K_tgt = K_tgt.to(device=src_BCHW.device) 97 | # the goal is compute H_src_tgt, that maps a tgt pixel to src pixel 98 | # so we compute H_tgt_src first, and then inverse 99 | n = self.n.to(device=src_BCHW.device) 100 | n = n.unsqueeze(0).repeat(B, 1) # Bx3 101 | # Bx3x3 - (Bx3x1 * Bx1x3) 102 | d_src_B33 = d_src_B.reshape(B, 1, 1).repeat(1, 3, 3) # B -> Bx3x3 103 | R_tnd = R_tgt_src - torch.matmul(t_tgt_src.unsqueeze(2), n.unsqueeze(1)) / -d_src_B33 104 | H_tgt_src = torch.matmul(K_tgt, 105 | torch.matmul(R_tnd, K_src_inv)) 106 | # From source to Target 107 | with torch.no_grad(): 108 | H_src_tgt = inverse(H_tgt_src) 109 | # create tgt image grid, and map to src 110 | meshgrid_tgt_homo = self.meshgrid.to(src_BCHW.device) 111 | # 3xHxW -> Bx3xHxW 112 | meshgrid_tgt_homo = meshgrid_tgt_homo.unsqueeze(0).expand(B, 3, Height_tgt, Width_tgt) 113 | # wrap meshgrid_tgt_homo to meshgrid_src 114 | meshgrid_tgt_homo_B3N = meshgrid_tgt_homo.view(B, 3, -1) # Bx3xHW 115 | meshgrid_src_homo_B3N = torch.matmul(H_src_tgt, meshgrid_tgt_homo_B3N) # Bx3x3 * Bx3xHW -> Bx3xHW 116 | # Bx3xHW -> Bx3xHxW -> BxHxWx3 117 | meshgrid_src_homo = meshgrid_src_homo_B3N.view(B, 3, Height_tgt, Width_tgt).permute(0, 2, 3, 1) 118 | meshgrid_src = meshgrid_src_homo[:, :, :, 0:2] / meshgrid_src_homo[:, :, :, 2:] # BxHxWx2 119 | np_meshgrid_src = meshgrid_src.cpu().detach().numpy() 120 | valid_mask_x = np.logical_and(np_meshgrid_src[:, :, :, 0] < Width_src, np_meshgrid_src[:, :, :, 0] > -1) 121 | valid_mask_y = np.logical_and(np_meshgrid_src[:, :, :, 1] < Height_src, np_meshgrid_src[:, :, :, 1] > -1) 122 | valid_mask = np.logical_and(valid_mask_x, valid_mask_y) # BxHxW 123 | valid_mask = torch.tensor(valid_mask).to(src_BCHW.device) 124 | # sample from src_BCHW 125 | # normalize meshgrid_src to [-1,1] 126 | meshgrid_src[:, :, :, 0] = (meshgrid_src[:, :, :, 0]+0.5) / (Width_src * 0.5) - 1 127 | meshgrid_src[:, :, :, 1] = (meshgrid_src[:, :, :, 1]+0.5) / (Height_src * 0.5) - 1 128 | tgt_BCHW = torch.nn.functional.grid_sample(src_BCHW, grid=meshgrid_src, padding_mode='border', 129 | align_corners=False) 130 | # BxCxHxW, BxHxW 131 | return tgt_BCHW, valid_mask 132 | 133 | def inverse(matrices): 134 | inverse = None 135 | max_tries = 5 136 | while (inverse is None) or (torch.isnan(inverse)).any(): 137 | torch.cuda.synchronize() 138 | inverse = torch.inverse(matrices) 139 | # Break out of the loop when the inverse is successful or there"re no more tries 140 | max_tries -= 1 141 | if max_tries == 0: 142 | break 143 | # Raise an Exception if the inverse contains nan 144 | if (torch.isnan(inverse)).any(): 145 | raise Exception("Matrix inverse contains nan!") 146 | return inverse 147 | 148 | def get_disparity_list(opt, B, device): 149 | S_coarse = opt.num_bins 150 | disparity_start, disparity_end = opt.disparity_start, opt.disparity_end 151 | if not opt.uniform_disparity: 152 | disparity_coarse = torch.linspace( 153 | disparity_start, disparity_end, S_coarse, dtype=torch.float32, 154 | device=device).unsqueeze(0).repeat(B, 1) 155 | return disparity_coarse # B, S 156 | elif opt.uniform_disparity: 157 | disparity_coarse = uniformly_sample_disparity_from_linspace_bins( 158 | batch_size=B, num_bins=S_coarse, 159 | start=disparity_start, 160 | end=disparity_end, device=device) 161 | return disparity_coarse # B, S 162 | 163 | def uniformly_sample_disparity_from_linspace_bins(batch_size, num_bins, start, end, device): 164 | assert start > end 165 | B, S = batch_size, num_bins 166 | bin_edges = torch.linspace(start, end, num_bins+1, dtype=torch.float32, device=device) # S+1 167 | interval = bin_edges[1] - bin_edges[0] # scalar 168 | bin_edges_start = bin_edges[0:-1].unsqueeze(0).repeat(B, 1) # S -> BxS 169 | random_float = torch.rand((B, S), dtype=torch.float32, device=device) # BxS 170 | disparity_array = bin_edges_start + interval * random_float 171 | return disparity_array # BxS 172 | 173 | '''----------------Transformation for pose matrix''' 174 | def transformation_from_parameters(axisangle, translation, invert=False): 175 | R = rot_from_axisangle(axisangle) 176 | t = translation.clone() 177 | if invert: 178 | R = R.transpose(1, 2) 179 | t *= -1 180 | T = get_translation_matrix(t) 181 | if invert: 182 | M = torch.matmul(R, T) 183 | else: 184 | M = torch.matmul(T, R) 185 | return M 186 | 187 | def get_translation_matrix(translation_vector): 188 | T = torch.zeros(translation_vector.shape[0], 4, 4).cuda() 189 | t = translation_vector.contiguous().view(-1, 3, 1) 190 | T[:, 0, 0] = 1 191 | T[:, 1, 1] = 1 192 | T[:, 2, 2] = 1 193 | T[:, 3, 3] = 1 194 | T[:, :3, 3, None] = t 195 | return T 196 | 197 | def rot_from_axisangle(vec): 198 | angle = torch.norm(vec, 2, 2, True) 199 | axis = vec / (angle + 1e-7) 200 | ca = torch.cos(angle) 201 | sa = torch.sin(angle) 202 | C = 1 - ca 203 | x = axis[..., 0].unsqueeze(1) 204 | y = axis[..., 1].unsqueeze(1) 205 | z = axis[..., 2].unsqueeze(1) 206 | xs = x * sa 207 | ys = y * sa 208 | zs = z * sa 209 | xC = x * C 210 | yC = y * C 211 | zC = z * C 212 | xyC = x * yC 213 | yzC = y * zC 214 | zxC = z * xC 215 | rot = torch.zeros((vec.shape[0], 4, 4)).cuda() 216 | rot[:, 0, 0] = torch.squeeze(x * xC + ca) 217 | rot[:, 0, 1] = torch.squeeze(xyC - zs) 218 | rot[:, 0, 2] = torch.squeeze(zxC + ys) 219 | rot[:, 1, 0] = torch.squeeze(xyC + zs) 220 | rot[:, 1, 1] = torch.squeeze(y * yC + ca) 221 | rot[:, 1, 2] = torch.squeeze(yzC - xs) 222 | rot[:, 2, 0] = torch.squeeze(zxC - ys) 223 | rot[:, 2, 1] = torch.squeeze(yzC + xs) 224 | rot[:, 2, 2] = torch.squeeze(z * zC + ca) 225 | rot[:, 3, 3] = 1 226 | return rot -------------------------------------------------------------------------------- /options.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import os 4 | import argparse 5 | 6 | file_dir = os.path.dirname(__file__) # the directory that options.py resides in 7 | 8 | 9 | class MonodepthOptions: 10 | def __init__(self): 11 | self.parser = argparse.ArgumentParser(description="Monodepthv2 options") 12 | # ----------- Devnet 13 | self.parser.add_argument("--seed", 14 | type=int, 15 | default=256) 16 | self.parser.add_argument("--pos_encoding_multires", 17 | default=3) 18 | self.parser.add_argument("--resnet_num_layers", 19 | type=int, 20 | help="number of resnet layers", 21 | default=18, 22 | choices=[18, 34, 50, 101, 152]) 23 | self.parser.add_argument("--imagenet_pretrained", 24 | default=True) 25 | self.parser.add_argument("--depth_pretrained_path", 26 | default="/mnt/nas/kaichen/eng/DEV/DEV/dif/models/resnet/resnet18.pth") 27 | self.parser.add_argument("--num_bins", 28 | default=32) 29 | self.parser.add_argument("--use_alpha", 30 | default=None) 31 | 32 | self.parser.add_argument("--use_var_loss", 33 | default=False) 34 | self.parser.add_argument("--var_weight", 35 | default=1e-03) 36 | self.parser.add_argument("--normal_rank_weight", 37 | default=1e-01) 38 | self.parser.add_argument("--normal_matching_weight", 39 | default=1e-03) 40 | self.parser.add_argument("--disparity_start", 41 | default=0.01) 42 | self.parser.add_argument("--disparity_end", 43 | default=10) 44 | self.parser.add_argument("--uniform_disparity", 45 | default=False) 46 | 47 | self.parser.add_argument("--use_freeze_epoch", 48 | type=int, 49 | default=20) 50 | self.parser.add_argument("--occlusion_mask", 51 | type=bool, 52 | default=False) 53 | self.parser.add_argument("--depth_weight", 54 | type=float, 55 | default=1e-3) 56 | self.parser.add_argument("--cutmix", 57 | type=bool, 58 | default=False) 59 | self.parser.add_argument("--l1_fake", 60 | type=bool, 61 | default=False) 62 | self.parser.add_argument("--rank_fake", 63 | type=bool, 64 | default=False) 65 | 66 | # PATHS 67 | self.parser.add_argument("--data_path", 68 | type=str, 69 | help="path to the training data", 70 | #default=os.path.join(file_dir, "train_val")) 71 | default='/mnt/nas/kaichen/kitti') 72 | self.parser.add_argument("--log_dir", 73 | type=str, 74 | help="log directory", 75 | default='/mnt/nas/kaichen/eng/DEV/DEV/dif/logs/') 76 | 77 | # TRAINING options 78 | self.parser.add_argument("--model_name", 79 | type=str, 80 | help="the name of the folder to save the model in", 81 | default="mdp") 82 | self.parser.add_argument("--split", 83 | type=str, 84 | help="which training split to use", 85 | choices=["eigen_zhou", "eigen_full", "odom", "benchmark", "cityscapes_preprocessed"], 86 | default="eigen_zhou") 87 | self.parser.add_argument("--num_layers", 88 | type=int, 89 | help="number of resnet layers", 90 | default=18, 91 | choices=[18, 34, 50, 101, 152]) 92 | self.parser.add_argument("--dataset", 93 | type=str, 94 | help="dataset to train on", 95 | default="kitti", 96 | choices=["vk2", "kitti", "kitti_odom", "kitti_depth", "kitti_test","cityscapes_preprocessed"]) 97 | self.parser.add_argument("--png", 98 | help="if set, trains from raw KITTI png files (instead of jpgs)", 99 | action="store_true") 100 | self.parser.add_argument("--height", 101 | type=int, 102 | help="input image height", 103 | default=192) 104 | self.parser.add_argument("--width", 105 | type=int, 106 | help="input image width", 107 | default=640) 108 | self.parser.add_argument("--disparity_smoothness", 109 | type=float, 110 | help="disparity smoothness weight", 111 | default=1e-3) 112 | self.parser.add_argument("--scales", 113 | nargs="+", 114 | type=int, 115 | help="scales used in the loss", 116 | default=[0, 1, 2, 3]) 117 | self.parser.add_argument("--min_depth", 118 | type=float, 119 | help="minimum depth", 120 | default=0.1) 121 | self.parser.add_argument("--max_depth", 122 | type=float, 123 | help="maximum depth", 124 | default=100.0) 125 | self.parser.add_argument("--use_stereo", 126 | help="if set, uses stereo pair for training", 127 | action="store_true") 128 | self.parser.add_argument("--frame_ids", 129 | nargs="+", 130 | type=int, 131 | help="frames to load", 132 | default=[0, -1, 1]) 133 | 134 | # OPTIMIZATION options 135 | self.parser.add_argument("--batch_size", 136 | type=int, 137 | help="batch size", 138 | default=12) 139 | self.parser.add_argument("--learning_rate", 140 | type=float, 141 | help="learning rate", 142 | default=1e-4) 143 | self.parser.add_argument("--num_epochs", 144 | type=int, 145 | help="number of epochs", 146 | default=20) 147 | self.parser.add_argument("--scheduler_step_size", 148 | type=int, 149 | help="step size of the scheduler", 150 | default=15)#default = 15 151 | 152 | # ABLATION options 153 | self.parser.add_argument("--v1_multiscale", 154 | help="if set, uses monodepth v1 multiscale", 155 | action="store_true") 156 | self.parser.add_argument("--avg_reprojection", 157 | help="if set, uses average reprojection loss", 158 | action="store_true") 159 | self.parser.add_argument("--disable_automasking", 160 | help="if set, doesn't do auto-masking", 161 | action="store_true") 162 | self.parser.add_argument("--predictive_mask", 163 | help="if set, uses a predictive masking scheme as in Zhou et al", 164 | action="store_true") 165 | self.parser.add_argument("--no_ssim", 166 | help="if set, disables ssim in the loss", 167 | action="store_true") 168 | self.parser.add_argument("--weights_init", 169 | type=str, 170 | help="pretrained or scratch", 171 | default="pretrained", 172 | choices=["pretrained", "scratch"]) 173 | self.parser.add_argument("--pose_model_input", 174 | type=str, 175 | help="how many images the pose network gets", 176 | default="pairs", 177 | choices=["pairs", "all"]) 178 | self.parser.add_argument("--pose_model_type", 179 | type=str, 180 | help="normal or shared", 181 | default="separate_resnet", 182 | choices=["posecnn", "separate_resnet", "shared"]) 183 | 184 | # SYSTEM options 185 | self.parser.add_argument("--no_cuda", 186 | help="if set disables CUDA", 187 | action="store_true") 188 | self.parser.add_argument("--num_workers", 189 | type=int, 190 | help="number of dataloader workers", 191 | default=12) 192 | 193 | # LOADING options 194 | self.parser.add_argument("--load_weights_folder", 195 | type=str, 196 | help="name of model to load") 197 | self.parser.add_argument("--models_to_load", 198 | nargs="+", 199 | type=str, 200 | help="models to load", 201 | #default=["pose_encoder", "pose"]) 202 | default=["encoder", "depth", "pose_encoder", "pose"]) 203 | 204 | # LOGGING options 205 | self.parser.add_argument("--log_frequency", 206 | type=int, 207 | help="number of batches between each tensorboard log", 208 | default=250) 209 | self.parser.add_argument("--save_frequency", 210 | type=int, 211 | help="number of epochs between each save", 212 | default=1) 213 | 214 | # EVALUATION options 215 | self.parser.add_argument("--eval_stereo", 216 | help="if set evaluates in stereo mode", 217 | action="store_true") 218 | self.parser.add_argument("--eval_mono", 219 | help="if set evaluates in mono mode", 220 | action="store_true") 221 | self.parser.add_argument("--disable_median_scaling", 222 | help="if set disables median scaling in evaluation", 223 | action="store_true") 224 | self.parser.add_argument("--pred_depth_scale_factor", 225 | help="if set multiplies predictions by this number", 226 | type=float, 227 | default=1) 228 | self.parser.add_argument("--ext_disp_to_eval", 229 | type=str, 230 | help="optional path to a .npy disparities file to evaluate") 231 | self.parser.add_argument("--eval_split", 232 | type=str, 233 | default="eigen", 234 | choices=[ 235 | "cityscapes", "eigen", "eigen_benchmark", "benchmark", "odom_9", "odom_10"], 236 | help="which split to run eval on") 237 | self.parser.add_argument("--save_pred_disps", 238 | help="if set saves predicted disparities", 239 | action="store_true") 240 | self.parser.add_argument("--no_eval", 241 | help="if set disables evaluation", 242 | action="store_true") 243 | self.parser.add_argument("--eval_eigen_to_benchmark", 244 | help="if set assume we are loading eigen results from npy but " 245 | "we want to evaluate using the new benchmark.", 246 | action="store_true") 247 | self.parser.add_argument("--eval_out_dir", 248 | help="if set will output the disparities to this folder", 249 | type=str) 250 | self.parser.add_argument("--post_process", 251 | help="if set will perform the flipping post processing " 252 | "from the original monodepth paper", 253 | action="store_true") 254 | 255 | def parse(self): 256 | self.options = self.parser.parse_args() 257 | return self.options 258 | -------------------------------------------------------------------------------- /splits/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/splits/.DS_Store -------------------------------------------------------------------------------- /splits/benchmark/eigen_to_benchmark_ids.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/splits/benchmark/eigen_to_benchmark_ids.npy -------------------------------------------------------------------------------- /splits/benchmark/test_files.txt: -------------------------------------------------------------------------------- 1 | image 0 2 | image 1 3 | image 2 4 | image 3 5 | image 4 6 | image 5 7 | image 6 8 | image 7 9 | image 8 10 | image 9 11 | image 10 12 | image 11 13 | image 12 14 | image 13 15 | image 14 16 | image 15 17 | image 16 18 | image 17 19 | image 18 20 | image 19 21 | image 20 22 | image 21 23 | image 22 24 | image 23 25 | image 24 26 | image 25 27 | image 26 28 | image 27 29 | image 28 30 | image 29 31 | image 30 32 | image 31 33 | image 32 34 | image 33 35 | image 34 36 | image 35 37 | image 36 38 | image 37 39 | image 38 40 | image 39 41 | image 40 42 | image 41 43 | image 42 44 | image 43 45 | image 44 46 | image 45 47 | image 46 48 | image 47 49 | image 48 50 | image 49 51 | image 50 52 | image 51 53 | image 52 54 | image 53 55 | image 54 56 | image 55 57 | image 56 58 | image 57 59 | image 58 60 | image 59 61 | image 60 62 | image 61 63 | image 62 64 | image 63 65 | image 64 66 | image 65 67 | image 66 68 | image 67 69 | image 68 70 | image 69 71 | image 70 72 | image 71 73 | image 72 74 | image 73 75 | image 74 76 | image 75 77 | image 76 78 | image 77 79 | image 78 80 | image 79 81 | image 80 82 | image 81 83 | image 82 84 | image 83 85 | image 84 86 | image 85 87 | image 86 88 | image 87 89 | image 88 90 | image 89 91 | image 90 92 | image 91 93 | image 92 94 | image 93 95 | image 94 96 | image 95 97 | image 96 98 | image 97 99 | image 98 100 | image 99 101 | image 100 102 | image 101 103 | image 102 104 | image 103 105 | image 104 106 | image 105 107 | image 106 108 | image 107 109 | image 108 110 | image 109 111 | image 110 112 | image 111 113 | image 112 114 | image 113 115 | image 114 116 | image 115 117 | image 116 118 | image 117 119 | image 118 120 | image 119 121 | image 120 122 | image 121 123 | image 122 124 | image 123 125 | image 124 126 | image 125 127 | image 126 128 | image 127 129 | image 128 130 | image 129 131 | image 130 132 | image 131 133 | image 132 134 | image 133 135 | image 134 136 | image 135 137 | image 136 138 | image 137 139 | image 138 140 | image 139 141 | image 140 142 | image 141 143 | image 142 144 | image 143 145 | image 144 146 | image 145 147 | image 146 148 | image 147 149 | image 148 150 | image 149 151 | image 150 152 | image 151 153 | image 152 154 | image 153 155 | image 154 156 | image 155 157 | image 156 158 | image 157 159 | image 158 160 | image 159 161 | image 160 162 | image 161 163 | image 162 164 | image 163 165 | image 164 166 | image 165 167 | image 166 168 | image 167 169 | image 168 170 | image 169 171 | image 170 172 | image 171 173 | image 172 174 | image 173 175 | image 174 176 | image 175 177 | image 176 178 | image 177 179 | image 178 180 | image 179 181 | image 180 182 | image 181 183 | image 182 184 | image 183 185 | image 184 186 | image 185 187 | image 186 188 | image 187 189 | image 188 190 | image 189 191 | image 190 192 | image 191 193 | image 192 194 | image 193 195 | image 194 196 | image 195 197 | image 196 198 | image 197 199 | image 198 200 | image 199 201 | image 200 202 | image 201 203 | image 202 204 | image 203 205 | image 204 206 | image 205 207 | image 206 208 | image 207 209 | image 208 210 | image 209 211 | image 210 212 | image 211 213 | image 212 214 | image 213 215 | image 214 216 | image 215 217 | image 216 218 | image 217 219 | image 218 220 | image 219 221 | image 220 222 | image 221 223 | image 222 224 | image 223 225 | image 224 226 | image 225 227 | image 226 228 | image 227 229 | image 228 230 | image 229 231 | image 230 232 | image 231 233 | image 232 234 | image 233 235 | image 234 236 | image 235 237 | image 236 238 | image 237 239 | image 238 240 | image 239 241 | image 240 242 | image 241 243 | image 242 244 | image 243 245 | image 244 246 | image 245 247 | image 246 248 | image 247 249 | image 248 250 | image 249 251 | image 250 252 | image 251 253 | image 252 254 | image 253 255 | image 254 256 | image 255 257 | image 256 258 | image 257 259 | image 258 260 | image 259 261 | image 260 262 | image 261 263 | image 262 264 | image 263 265 | image 264 266 | image 265 267 | image 266 268 | image 267 269 | image 268 270 | image 269 271 | image 270 272 | image 271 273 | image 272 274 | image 273 275 | image 274 276 | image 275 277 | image 276 278 | image 277 279 | image 278 280 | image 279 281 | image 280 282 | image 281 283 | image 282 284 | image 283 285 | image 284 286 | image 285 287 | image 286 288 | image 287 289 | image 288 290 | image 289 291 | image 290 292 | image 291 293 | image 292 294 | image 293 295 | image 294 296 | image 295 297 | image 296 298 | image 297 299 | image 298 300 | image 299 301 | image 300 302 | image 301 303 | image 302 304 | image 303 305 | image 304 306 | image 305 307 | image 306 308 | image 307 309 | image 308 310 | image 309 311 | image 310 312 | image 311 313 | image 312 314 | image 313 315 | image 314 316 | image 315 317 | image 316 318 | image 317 319 | image 318 320 | image 319 321 | image 320 322 | image 321 323 | image 322 324 | image 323 325 | image 324 326 | image 325 327 | image 326 328 | image 327 329 | image 328 330 | image 329 331 | image 330 332 | image 331 333 | image 332 334 | image 333 335 | image 334 336 | image 335 337 | image 336 338 | image 337 339 | image 338 340 | image 339 341 | image 340 342 | image 341 343 | image 342 344 | image 343 345 | image 344 346 | image 345 347 | image 346 348 | image 347 349 | image 348 350 | image 349 351 | image 350 352 | image 351 353 | image 352 354 | image 353 355 | image 354 356 | image 355 357 | image 356 358 | image 357 359 | image 358 360 | image 359 361 | image 360 362 | image 361 363 | image 362 364 | image 363 365 | image 364 366 | image 365 367 | image 366 368 | image 367 369 | image 368 370 | image 369 371 | image 370 372 | image 371 373 | image 372 374 | image 373 375 | image 374 376 | image 375 377 | image 376 378 | image 377 379 | image 378 380 | image 379 381 | image 380 382 | image 381 383 | image 382 384 | image 383 385 | image 384 386 | image 385 387 | image 386 388 | image 387 389 | image 388 390 | image 389 391 | image 390 392 | image 391 393 | image 392 394 | image 393 395 | image 394 396 | image 395 397 | image 396 398 | image 397 399 | image 398 400 | image 399 401 | image 400 402 | image 401 403 | image 402 404 | image 403 405 | image 404 406 | image 405 407 | image 406 408 | image 407 409 | image 408 410 | image 409 411 | image 410 412 | image 411 413 | image 412 414 | image 413 415 | image 414 416 | image 415 417 | image 416 418 | image 417 419 | image 418 420 | image 419 421 | image 420 422 | image 421 423 | image 422 424 | image 423 425 | image 424 426 | image 425 427 | image 426 428 | image 427 429 | image 428 430 | image 429 431 | image 430 432 | image 431 433 | image 432 434 | image 433 435 | image 434 436 | image 435 437 | image 436 438 | image 437 439 | image 438 440 | image 439 441 | image 440 442 | image 441 443 | image 442 444 | image 443 445 | image 444 446 | image 445 447 | image 446 448 | image 447 449 | image 448 450 | image 449 451 | image 450 452 | image 451 453 | image 452 454 | image 453 455 | image 454 456 | image 455 457 | image 456 458 | image 457 459 | image 458 460 | image 459 461 | image 460 462 | image 461 463 | image 462 464 | image 463 465 | image 464 466 | image 465 467 | image 466 468 | image 467 469 | image 468 470 | image 469 471 | image 470 472 | image 471 473 | image 472 474 | image 473 475 | image 474 476 | image 475 477 | image 476 478 | image 477 479 | image 478 480 | image 479 481 | image 480 482 | image 481 483 | image 482 484 | image 483 485 | image 484 486 | image 485 487 | image 486 488 | image 487 489 | image 488 490 | image 489 491 | image 490 492 | image 491 493 | image 492 494 | image 493 495 | image 494 496 | image 495 497 | image 496 498 | image 497 499 | image 498 500 | image 499 501 | -------------------------------------------------------------------------------- /splits/eigen/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/splits/eigen/.DS_Store -------------------------------------------------------------------------------- /splits/eigen/gt_depths.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaichen-z/DevNet/f71499dc078fec5fa65bd69a757f58df341aae93/splits/eigen/gt_depths.npz -------------------------------------------------------------------------------- /splits/odom/test_files_09.txt: -------------------------------------------------------------------------------- 1 | 9 0 l 2 | 9 1 l 3 | 9 2 l 4 | 9 3 l 5 | 9 4 l 6 | 9 5 l 7 | 9 6 l 8 | 9 7 l 9 | 9 8 l 10 | 9 9 l 11 | 9 10 l 12 | 9 11 l 13 | 9 12 l 14 | 9 13 l 15 | 9 14 l 16 | 9 15 l 17 | 9 16 l 18 | 9 17 l 19 | 9 18 l 20 | 9 19 l 21 | 9 20 l 22 | 9 21 l 23 | 9 22 l 24 | 9 23 l 25 | 9 24 l 26 | 9 25 l 27 | 9 26 l 28 | 9 27 l 29 | 9 28 l 30 | 9 29 l 31 | 9 30 l 32 | 9 31 l 33 | 9 32 l 34 | 9 33 l 35 | 9 34 l 36 | 9 35 l 37 | 9 36 l 38 | 9 37 l 39 | 9 38 l 40 | 9 39 l 41 | 9 40 l 42 | 9 41 l 43 | 9 42 l 44 | 9 43 l 45 | 9 44 l 46 | 9 45 l 47 | 9 46 l 48 | 9 47 l 49 | 9 48 l 50 | 9 49 l 51 | 9 50 l 52 | 9 51 l 53 | 9 52 l 54 | 9 53 l 55 | 9 54 l 56 | 9 55 l 57 | 9 56 l 58 | 9 57 l 59 | 9 58 l 60 | 9 59 l 61 | 9 60 l 62 | 9 61 l 63 | 9 62 l 64 | 9 63 l 65 | 9 64 l 66 | 9 65 l 67 | 9 66 l 68 | 9 67 l 69 | 9 68 l 70 | 9 69 l 71 | 9 70 l 72 | 9 71 l 73 | 9 72 l 74 | 9 73 l 75 | 9 74 l 76 | 9 75 l 77 | 9 76 l 78 | 9 77 l 79 | 9 78 l 80 | 9 79 l 81 | 9 80 l 82 | 9 81 l 83 | 9 82 l 84 | 9 83 l 85 | 9 84 l 86 | 9 85 l 87 | 9 86 l 88 | 9 87 l 89 | 9 88 l 90 | 9 89 l 91 | 9 90 l 92 | 9 91 l 93 | 9 92 l 94 | 9 93 l 95 | 9 94 l 96 | 9 95 l 97 | 9 96 l 98 | 9 97 l 99 | 9 98 l 100 | 9 99 l 101 | 9 100 l 102 | 9 101 l 103 | 9 102 l 104 | 9 103 l 105 | 9 104 l 106 | 9 105 l 107 | 9 106 l 108 | 9 107 l 109 | 9 108 l 110 | 9 109 l 111 | 9 110 l 112 | 9 111 l 113 | 9 112 l 114 | 9 113 l 115 | 9 114 l 116 | 9 115 l 117 | 9 116 l 118 | 9 117 l 119 | 9 118 l 120 | 9 119 l 121 | 9 120 l 122 | 9 121 l 123 | 9 122 l 124 | 9 123 l 125 | 9 124 l 126 | 9 125 l 127 | 9 126 l 128 | 9 127 l 129 | 9 128 l 130 | 9 129 l 131 | 9 130 l 132 | 9 131 l 133 | 9 132 l 134 | 9 133 l 135 | 9 134 l 136 | 9 135 l 137 | 9 136 l 138 | 9 137 l 139 | 9 138 l 140 | 9 139 l 141 | 9 140 l 142 | 9 141 l 143 | 9 142 l 144 | 9 143 l 145 | 9 144 l 146 | 9 145 l 147 | 9 146 l 148 | 9 147 l 149 | 9 148 l 150 | 9 149 l 151 | 9 150 l 152 | 9 151 l 153 | 9 152 l 154 | 9 153 l 155 | 9 154 l 156 | 9 155 l 157 | 9 156 l 158 | 9 157 l 159 | 9 158 l 160 | 9 159 l 161 | 9 160 l 162 | 9 161 l 163 | 9 162 l 164 | 9 163 l 165 | 9 164 l 166 | 9 165 l 167 | 9 166 l 168 | 9 167 l 169 | 9 168 l 170 | 9 169 l 171 | 9 170 l 172 | 9 171 l 173 | 9 172 l 174 | 9 173 l 175 | 9 174 l 176 | 9 175 l 177 | 9 176 l 178 | 9 177 l 179 | 9 178 l 180 | 9 179 l 181 | 9 180 l 182 | 9 181 l 183 | 9 182 l 184 | 9 183 l 185 | 9 184 l 186 | 9 185 l 187 | 9 186 l 188 | 9 187 l 189 | 9 188 l 190 | 9 189 l 191 | 9 190 l 192 | 9 191 l 193 | 9 192 l 194 | 9 193 l 195 | 9 194 l 196 | 9 195 l 197 | 9 196 l 198 | 9 197 l 199 | 9 198 l 200 | 9 199 l 201 | 9 200 l 202 | 9 201 l 203 | 9 202 l 204 | 9 203 l 205 | 9 204 l 206 | 9 205 l 207 | 9 206 l 208 | 9 207 l 209 | 9 208 l 210 | 9 209 l 211 | 9 210 l 212 | 9 211 l 213 | 9 212 l 214 | 9 213 l 215 | 9 214 l 216 | 9 215 l 217 | 9 216 l 218 | 9 217 l 219 | 9 218 l 220 | 9 219 l 221 | 9 220 l 222 | 9 221 l 223 | 9 222 l 224 | 9 223 l 225 | 9 224 l 226 | 9 225 l 227 | 9 226 l 228 | 9 227 l 229 | 9 228 l 230 | 9 229 l 231 | 9 230 l 232 | 9 231 l 233 | 9 232 l 234 | 9 233 l 235 | 9 234 l 236 | 9 235 l 237 | 9 236 l 238 | 9 237 l 239 | 9 238 l 240 | 9 239 l 241 | 9 240 l 242 | 9 241 l 243 | 9 242 l 244 | 9 243 l 245 | 9 244 l 246 | 9 245 l 247 | 9 246 l 248 | 9 247 l 249 | 9 248 l 250 | 9 249 l 251 | 9 250 l 252 | 9 251 l 253 | 9 252 l 254 | 9 253 l 255 | 9 254 l 256 | 9 255 l 257 | 9 256 l 258 | 9 257 l 259 | 9 258 l 260 | 9 259 l 261 | 9 260 l 262 | 9 261 l 263 | 9 262 l 264 | 9 263 l 265 | 9 264 l 266 | 9 265 l 267 | 9 266 l 268 | 9 267 l 269 | 9 268 l 270 | 9 269 l 271 | 9 270 l 272 | 9 271 l 273 | 9 272 l 274 | 9 273 l 275 | 9 274 l 276 | 9 275 l 277 | 9 276 l 278 | 9 277 l 279 | 9 278 l 280 | 9 279 l 281 | 9 280 l 282 | 9 281 l 283 | 9 282 l 284 | 9 283 l 285 | 9 284 l 286 | 9 285 l 287 | 9 286 l 288 | 9 287 l 289 | 9 288 l 290 | 9 289 l 291 | 9 290 l 292 | 9 291 l 293 | 9 292 l 294 | 9 293 l 295 | 9 294 l 296 | 9 295 l 297 | 9 296 l 298 | 9 297 l 299 | 9 298 l 300 | 9 299 l 301 | 9 300 l 302 | 9 301 l 303 | 9 302 l 304 | 9 303 l 305 | 9 304 l 306 | 9 305 l 307 | 9 306 l 308 | 9 307 l 309 | 9 308 l 310 | 9 309 l 311 | 9 310 l 312 | 9 311 l 313 | 9 312 l 314 | 9 313 l 315 | 9 314 l 316 | 9 315 l 317 | 9 316 l 318 | 9 317 l 319 | 9 318 l 320 | 9 319 l 321 | 9 320 l 322 | 9 321 l 323 | 9 322 l 324 | 9 323 l 325 | 9 324 l 326 | 9 325 l 327 | 9 326 l 328 | 9 327 l 329 | 9 328 l 330 | 9 329 l 331 | 9 330 l 332 | 9 331 l 333 | 9 332 l 334 | 9 333 l 335 | 9 334 l 336 | 9 335 l 337 | 9 336 l 338 | 9 337 l 339 | 9 338 l 340 | 9 339 l 341 | 9 340 l 342 | 9 341 l 343 | 9 342 l 344 | 9 343 l 345 | 9 344 l 346 | 9 345 l 347 | 9 346 l 348 | 9 347 l 349 | 9 348 l 350 | 9 349 l 351 | 9 350 l 352 | 9 351 l 353 | 9 352 l 354 | 9 353 l 355 | 9 354 l 356 | 9 355 l 357 | 9 356 l 358 | 9 357 l 359 | 9 358 l 360 | 9 359 l 361 | 9 360 l 362 | 9 361 l 363 | 9 362 l 364 | 9 363 l 365 | 9 364 l 366 | 9 365 l 367 | 9 366 l 368 | 9 367 l 369 | 9 368 l 370 | 9 369 l 371 | 9 370 l 372 | 9 371 l 373 | 9 372 l 374 | 9 373 l 375 | 9 374 l 376 | 9 375 l 377 | 9 376 l 378 | 9 377 l 379 | 9 378 l 380 | 9 379 l 381 | 9 380 l 382 | 9 381 l 383 | 9 382 l 384 | 9 383 l 385 | 9 384 l 386 | 9 385 l 387 | 9 386 l 388 | 9 387 l 389 | 9 388 l 390 | 9 389 l 391 | 9 390 l 392 | 9 391 l 393 | 9 392 l 394 | 9 393 l 395 | 9 394 l 396 | 9 395 l 397 | 9 396 l 398 | 9 397 l 399 | 9 398 l 400 | 9 399 l 401 | 9 400 l 402 | 9 401 l 403 | 9 402 l 404 | 9 403 l 405 | 9 404 l 406 | 9 405 l 407 | 9 406 l 408 | 9 407 l 409 | 9 408 l 410 | 9 409 l 411 | 9 410 l 412 | 9 411 l 413 | 9 412 l 414 | 9 413 l 415 | 9 414 l 416 | 9 415 l 417 | 9 416 l 418 | 9 417 l 419 | 9 418 l 420 | 9 419 l 421 | 9 420 l 422 | 9 421 l 423 | 9 422 l 424 | 9 423 l 425 | 9 424 l 426 | 9 425 l 427 | 9 426 l 428 | 9 427 l 429 | 9 428 l 430 | 9 429 l 431 | 9 430 l 432 | 9 431 l 433 | 9 432 l 434 | 9 433 l 435 | 9 434 l 436 | 9 435 l 437 | 9 436 l 438 | 9 437 l 439 | 9 438 l 440 | 9 439 l 441 | 9 440 l 442 | 9 441 l 443 | 9 442 l 444 | 9 443 l 445 | 9 444 l 446 | 9 445 l 447 | 9 446 l 448 | 9 447 l 449 | 9 448 l 450 | 9 449 l 451 | 9 450 l 452 | 9 451 l 453 | 9 452 l 454 | 9 453 l 455 | 9 454 l 456 | 9 455 l 457 | 9 456 l 458 | 9 457 l 459 | 9 458 l 460 | 9 459 l 461 | 9 460 l 462 | 9 461 l 463 | 9 462 l 464 | 9 463 l 465 | 9 464 l 466 | 9 465 l 467 | 9 466 l 468 | 9 467 l 469 | 9 468 l 470 | 9 469 l 471 | 9 470 l 472 | 9 471 l 473 | 9 472 l 474 | 9 473 l 475 | 9 474 l 476 | 9 475 l 477 | 9 476 l 478 | 9 477 l 479 | 9 478 l 480 | 9 479 l 481 | 9 480 l 482 | 9 481 l 483 | 9 482 l 484 | 9 483 l 485 | 9 484 l 486 | 9 485 l 487 | 9 486 l 488 | 9 487 l 489 | 9 488 l 490 | 9 489 l 491 | 9 490 l 492 | 9 491 l 493 | 9 492 l 494 | 9 493 l 495 | 9 494 l 496 | 9 495 l 497 | 9 496 l 498 | 9 497 l 499 | 9 498 l 500 | 9 499 l 501 | 9 500 l 502 | 9 501 l 503 | 9 502 l 504 | 9 503 l 505 | 9 504 l 506 | 9 505 l 507 | 9 506 l 508 | 9 507 l 509 | 9 508 l 510 | 9 509 l 511 | 9 510 l 512 | 9 511 l 513 | 9 512 l 514 | 9 513 l 515 | 9 514 l 516 | 9 515 l 517 | 9 516 l 518 | 9 517 l 519 | 9 518 l 520 | 9 519 l 521 | 9 520 l 522 | 9 521 l 523 | 9 522 l 524 | 9 523 l 525 | 9 524 l 526 | 9 525 l 527 | 9 526 l 528 | 9 527 l 529 | 9 528 l 530 | 9 529 l 531 | 9 530 l 532 | 9 531 l 533 | 9 532 l 534 | 9 533 l 535 | 9 534 l 536 | 9 535 l 537 | 9 536 l 538 | 9 537 l 539 | 9 538 l 540 | 9 539 l 541 | 9 540 l 542 | 9 541 l 543 | 9 542 l 544 | 9 543 l 545 | 9 544 l 546 | 9 545 l 547 | 9 546 l 548 | 9 547 l 549 | 9 548 l 550 | 9 549 l 551 | 9 550 l 552 | 9 551 l 553 | 9 552 l 554 | 9 553 l 555 | 9 554 l 556 | 9 555 l 557 | 9 556 l 558 | 9 557 l 559 | 9 558 l 560 | 9 559 l 561 | 9 560 l 562 | 9 561 l 563 | 9 562 l 564 | 9 563 l 565 | 9 564 l 566 | 9 565 l 567 | 9 566 l 568 | 9 567 l 569 | 9 568 l 570 | 9 569 l 571 | 9 570 l 572 | 9 571 l 573 | 9 572 l 574 | 9 573 l 575 | 9 574 l 576 | 9 575 l 577 | 9 576 l 578 | 9 577 l 579 | 9 578 l 580 | 9 579 l 581 | 9 580 l 582 | 9 581 l 583 | 9 582 l 584 | 9 583 l 585 | 9 584 l 586 | 9 585 l 587 | 9 586 l 588 | 9 587 l 589 | 9 588 l 590 | 9 589 l 591 | 9 590 l 592 | 9 591 l 593 | 9 592 l 594 | 9 593 l 595 | 9 594 l 596 | 9 595 l 597 | 9 596 l 598 | 9 597 l 599 | 9 598 l 600 | 9 599 l 601 | 9 600 l 602 | 9 601 l 603 | 9 602 l 604 | 9 603 l 605 | 9 604 l 606 | 9 605 l 607 | 9 606 l 608 | 9 607 l 609 | 9 608 l 610 | 9 609 l 611 | 9 610 l 612 | 9 611 l 613 | 9 612 l 614 | 9 613 l 615 | 9 614 l 616 | 9 615 l 617 | 9 616 l 618 | 9 617 l 619 | 9 618 l 620 | 9 619 l 621 | 9 620 l 622 | 9 621 l 623 | 9 622 l 624 | 9 623 l 625 | 9 624 l 626 | 9 625 l 627 | 9 626 l 628 | 9 627 l 629 | 9 628 l 630 | 9 629 l 631 | 9 630 l 632 | 9 631 l 633 | 9 632 l 634 | 9 633 l 635 | 9 634 l 636 | 9 635 l 637 | 9 636 l 638 | 9 637 l 639 | 9 638 l 640 | 9 639 l 641 | 9 640 l 642 | 9 641 l 643 | 9 642 l 644 | 9 643 l 645 | 9 644 l 646 | 9 645 l 647 | 9 646 l 648 | 9 647 l 649 | 9 648 l 650 | 9 649 l 651 | 9 650 l 652 | 9 651 l 653 | 9 652 l 654 | 9 653 l 655 | 9 654 l 656 | 9 655 l 657 | 9 656 l 658 | 9 657 l 659 | 9 658 l 660 | 9 659 l 661 | 9 660 l 662 | 9 661 l 663 | 9 662 l 664 | 9 663 l 665 | 9 664 l 666 | 9 665 l 667 | 9 666 l 668 | 9 667 l 669 | 9 668 l 670 | 9 669 l 671 | 9 670 l 672 | 9 671 l 673 | 9 672 l 674 | 9 673 l 675 | 9 674 l 676 | 9 675 l 677 | 9 676 l 678 | 9 677 l 679 | 9 678 l 680 | 9 679 l 681 | 9 680 l 682 | 9 681 l 683 | 9 682 l 684 | 9 683 l 685 | 9 684 l 686 | 9 685 l 687 | 9 686 l 688 | 9 687 l 689 | 9 688 l 690 | 9 689 l 691 | 9 690 l 692 | 9 691 l 693 | 9 692 l 694 | 9 693 l 695 | 9 694 l 696 | 9 695 l 697 | 9 696 l 698 | 9 697 l 699 | 9 698 l 700 | 9 699 l 701 | 9 700 l 702 | 9 701 l 703 | 9 702 l 704 | 9 703 l 705 | 9 704 l 706 | 9 705 l 707 | 9 706 l 708 | 9 707 l 709 | 9 708 l 710 | 9 709 l 711 | 9 710 l 712 | 9 711 l 713 | 9 712 l 714 | 9 713 l 715 | 9 714 l 716 | 9 715 l 717 | 9 716 l 718 | 9 717 l 719 | 9 718 l 720 | 9 719 l 721 | 9 720 l 722 | 9 721 l 723 | 9 722 l 724 | 9 723 l 725 | 9 724 l 726 | 9 725 l 727 | 9 726 l 728 | 9 727 l 729 | 9 728 l 730 | 9 729 l 731 | 9 730 l 732 | 9 731 l 733 | 9 732 l 734 | 9 733 l 735 | 9 734 l 736 | 9 735 l 737 | 9 736 l 738 | 9 737 l 739 | 9 738 l 740 | 9 739 l 741 | 9 740 l 742 | 9 741 l 743 | 9 742 l 744 | 9 743 l 745 | 9 744 l 746 | 9 745 l 747 | 9 746 l 748 | 9 747 l 749 | 9 748 l 750 | 9 749 l 751 | 9 750 l 752 | 9 751 l 753 | 9 752 l 754 | 9 753 l 755 | 9 754 l 756 | 9 755 l 757 | 9 756 l 758 | 9 757 l 759 | 9 758 l 760 | 9 759 l 761 | 9 760 l 762 | 9 761 l 763 | 9 762 l 764 | 9 763 l 765 | 9 764 l 766 | 9 765 l 767 | 9 766 l 768 | 9 767 l 769 | 9 768 l 770 | 9 769 l 771 | 9 770 l 772 | 9 771 l 773 | 9 772 l 774 | 9 773 l 775 | 9 774 l 776 | 9 775 l 777 | 9 776 l 778 | 9 777 l 779 | 9 778 l 780 | 9 779 l 781 | 9 780 l 782 | 9 781 l 783 | 9 782 l 784 | 9 783 l 785 | 9 784 l 786 | 9 785 l 787 | 9 786 l 788 | 9 787 l 789 | 9 788 l 790 | 9 789 l 791 | 9 790 l 792 | 9 791 l 793 | 9 792 l 794 | 9 793 l 795 | 9 794 l 796 | 9 795 l 797 | 9 796 l 798 | 9 797 l 799 | 9 798 l 800 | 9 799 l 801 | 9 800 l 802 | 9 801 l 803 | 9 802 l 804 | 9 803 l 805 | 9 804 l 806 | 9 805 l 807 | 9 806 l 808 | 9 807 l 809 | 9 808 l 810 | 9 809 l 811 | 9 810 l 812 | 9 811 l 813 | 9 812 l 814 | 9 813 l 815 | 9 814 l 816 | 9 815 l 817 | 9 816 l 818 | 9 817 l 819 | 9 818 l 820 | 9 819 l 821 | 9 820 l 822 | 9 821 l 823 | 9 822 l 824 | 9 823 l 825 | 9 824 l 826 | 9 825 l 827 | 9 826 l 828 | 9 827 l 829 | 9 828 l 830 | 9 829 l 831 | 9 830 l 832 | 9 831 l 833 | 9 832 l 834 | 9 833 l 835 | 9 834 l 836 | 9 835 l 837 | 9 836 l 838 | 9 837 l 839 | 9 838 l 840 | 9 839 l 841 | 9 840 l 842 | 9 841 l 843 | 9 842 l 844 | 9 843 l 845 | 9 844 l 846 | 9 845 l 847 | 9 846 l 848 | 9 847 l 849 | 9 848 l 850 | 9 849 l 851 | 9 850 l 852 | 9 851 l 853 | 9 852 l 854 | 9 853 l 855 | 9 854 l 856 | 9 855 l 857 | 9 856 l 858 | 9 857 l 859 | 9 858 l 860 | 9 859 l 861 | 9 860 l 862 | 9 861 l 863 | 9 862 l 864 | 9 863 l 865 | 9 864 l 866 | 9 865 l 867 | 9 866 l 868 | 9 867 l 869 | 9 868 l 870 | 9 869 l 871 | 9 870 l 872 | 9 871 l 873 | 9 872 l 874 | 9 873 l 875 | 9 874 l 876 | 9 875 l 877 | 9 876 l 878 | 9 877 l 879 | 9 878 l 880 | 9 879 l 881 | 9 880 l 882 | 9 881 l 883 | 9 882 l 884 | 9 883 l 885 | 9 884 l 886 | 9 885 l 887 | 9 886 l 888 | 9 887 l 889 | 9 888 l 890 | 9 889 l 891 | 9 890 l 892 | 9 891 l 893 | 9 892 l 894 | 9 893 l 895 | 9 894 l 896 | 9 895 l 897 | 9 896 l 898 | 9 897 l 899 | 9 898 l 900 | 9 899 l 901 | 9 900 l 902 | 9 901 l 903 | 9 902 l 904 | 9 903 l 905 | 9 904 l 906 | 9 905 l 907 | 9 906 l 908 | 9 907 l 909 | 9 908 l 910 | 9 909 l 911 | 9 910 l 912 | 9 911 l 913 | 9 912 l 914 | 9 913 l 915 | 9 914 l 916 | 9 915 l 917 | 9 916 l 918 | 9 917 l 919 | 9 918 l 920 | 9 919 l 921 | 9 920 l 922 | 9 921 l 923 | 9 922 l 924 | 9 923 l 925 | 9 924 l 926 | 9 925 l 927 | 9 926 l 928 | 9 927 l 929 | 9 928 l 930 | 9 929 l 931 | 9 930 l 932 | 9 931 l 933 | 9 932 l 934 | 9 933 l 935 | 9 934 l 936 | 9 935 l 937 | 9 936 l 938 | 9 937 l 939 | 9 938 l 940 | 9 939 l 941 | 9 940 l 942 | 9 941 l 943 | 9 942 l 944 | 9 943 l 945 | 9 944 l 946 | 9 945 l 947 | 9 946 l 948 | 9 947 l 949 | 9 948 l 950 | 9 949 l 951 | 9 950 l 952 | 9 951 l 953 | 9 952 l 954 | 9 953 l 955 | 9 954 l 956 | 9 955 l 957 | 9 956 l 958 | 9 957 l 959 | 9 958 l 960 | 9 959 l 961 | 9 960 l 962 | 9 961 l 963 | 9 962 l 964 | 9 963 l 965 | 9 964 l 966 | 9 965 l 967 | 9 966 l 968 | 9 967 l 969 | 9 968 l 970 | 9 969 l 971 | 9 970 l 972 | 9 971 l 973 | 9 972 l 974 | 9 973 l 975 | 9 974 l 976 | 9 975 l 977 | 9 976 l 978 | 9 977 l 979 | 9 978 l 980 | 9 979 l 981 | 9 980 l 982 | 9 981 l 983 | 9 982 l 984 | 9 983 l 985 | 9 984 l 986 | 9 985 l 987 | 9 986 l 988 | 9 987 l 989 | 9 988 l 990 | 9 989 l 991 | 9 990 l 992 | 9 991 l 993 | 9 992 l 994 | 9 993 l 995 | 9 994 l 996 | 9 995 l 997 | 9 996 l 998 | 9 997 l 999 | 9 998 l 1000 | 9 999 l 1001 | 9 1000 l 1002 | 9 1001 l 1003 | 9 1002 l 1004 | 9 1003 l 1005 | 9 1004 l 1006 | 9 1005 l 1007 | 9 1006 l 1008 | 9 1007 l 1009 | 9 1008 l 1010 | 9 1009 l 1011 | 9 1010 l 1012 | 9 1011 l 1013 | 9 1012 l 1014 | 9 1013 l 1015 | 9 1014 l 1016 | 9 1015 l 1017 | 9 1016 l 1018 | 9 1017 l 1019 | 9 1018 l 1020 | 9 1019 l 1021 | 9 1020 l 1022 | 9 1021 l 1023 | 9 1022 l 1024 | 9 1023 l 1025 | 9 1024 l 1026 | 9 1025 l 1027 | 9 1026 l 1028 | 9 1027 l 1029 | 9 1028 l 1030 | 9 1029 l 1031 | 9 1030 l 1032 | 9 1031 l 1033 | 9 1032 l 1034 | 9 1033 l 1035 | 9 1034 l 1036 | 9 1035 l 1037 | 9 1036 l 1038 | 9 1037 l 1039 | 9 1038 l 1040 | 9 1039 l 1041 | 9 1040 l 1042 | 9 1041 l 1043 | 9 1042 l 1044 | 9 1043 l 1045 | 9 1044 l 1046 | 9 1045 l 1047 | 9 1046 l 1048 | 9 1047 l 1049 | 9 1048 l 1050 | 9 1049 l 1051 | 9 1050 l 1052 | 9 1051 l 1053 | 9 1052 l 1054 | 9 1053 l 1055 | 9 1054 l 1056 | 9 1055 l 1057 | 9 1056 l 1058 | 9 1057 l 1059 | 9 1058 l 1060 | 9 1059 l 1061 | 9 1060 l 1062 | 9 1061 l 1063 | 9 1062 l 1064 | 9 1063 l 1065 | 9 1064 l 1066 | 9 1065 l 1067 | 9 1066 l 1068 | 9 1067 l 1069 | 9 1068 l 1070 | 9 1069 l 1071 | 9 1070 l 1072 | 9 1071 l 1073 | 9 1072 l 1074 | 9 1073 l 1075 | 9 1074 l 1076 | 9 1075 l 1077 | 9 1076 l 1078 | 9 1077 l 1079 | 9 1078 l 1080 | 9 1079 l 1081 | 9 1080 l 1082 | 9 1081 l 1083 | 9 1082 l 1084 | 9 1083 l 1085 | 9 1084 l 1086 | 9 1085 l 1087 | 9 1086 l 1088 | 9 1087 l 1089 | 9 1088 l 1090 | 9 1089 l 1091 | 9 1090 l 1092 | 9 1091 l 1093 | 9 1092 l 1094 | 9 1093 l 1095 | 9 1094 l 1096 | 9 1095 l 1097 | 9 1096 l 1098 | 9 1097 l 1099 | 9 1098 l 1100 | 9 1099 l 1101 | 9 1100 l 1102 | 9 1101 l 1103 | 9 1102 l 1104 | 9 1103 l 1105 | 9 1104 l 1106 | 9 1105 l 1107 | 9 1106 l 1108 | 9 1107 l 1109 | 9 1108 l 1110 | 9 1109 l 1111 | 9 1110 l 1112 | 9 1111 l 1113 | 9 1112 l 1114 | 9 1113 l 1115 | 9 1114 l 1116 | 9 1115 l 1117 | 9 1116 l 1118 | 9 1117 l 1119 | 9 1118 l 1120 | 9 1119 l 1121 | 9 1120 l 1122 | 9 1121 l 1123 | 9 1122 l 1124 | 9 1123 l 1125 | 9 1124 l 1126 | 9 1125 l 1127 | 9 1126 l 1128 | 9 1127 l 1129 | 9 1128 l 1130 | 9 1129 l 1131 | 9 1130 l 1132 | 9 1131 l 1133 | 9 1132 l 1134 | 9 1133 l 1135 | 9 1134 l 1136 | 9 1135 l 1137 | 9 1136 l 1138 | 9 1137 l 1139 | 9 1138 l 1140 | 9 1139 l 1141 | 9 1140 l 1142 | 9 1141 l 1143 | 9 1142 l 1144 | 9 1143 l 1145 | 9 1144 l 1146 | 9 1145 l 1147 | 9 1146 l 1148 | 9 1147 l 1149 | 9 1148 l 1150 | 9 1149 l 1151 | 9 1150 l 1152 | 9 1151 l 1153 | 9 1152 l 1154 | 9 1153 l 1155 | 9 1154 l 1156 | 9 1155 l 1157 | 9 1156 l 1158 | 9 1157 l 1159 | 9 1158 l 1160 | 9 1159 l 1161 | 9 1160 l 1162 | 9 1161 l 1163 | 9 1162 l 1164 | 9 1163 l 1165 | 9 1164 l 1166 | 9 1165 l 1167 | 9 1166 l 1168 | 9 1167 l 1169 | 9 1168 l 1170 | 9 1169 l 1171 | 9 1170 l 1172 | 9 1171 l 1173 | 9 1172 l 1174 | 9 1173 l 1175 | 9 1174 l 1176 | 9 1175 l 1177 | 9 1176 l 1178 | 9 1177 l 1179 | 9 1178 l 1180 | 9 1179 l 1181 | 9 1180 l 1182 | 9 1181 l 1183 | 9 1182 l 1184 | 9 1183 l 1185 | 9 1184 l 1186 | 9 1185 l 1187 | 9 1186 l 1188 | 9 1187 l 1189 | 9 1188 l 1190 | 9 1189 l 1191 | 9 1190 l 1192 | 9 1191 l 1193 | 9 1192 l 1194 | 9 1193 l 1195 | 9 1194 l 1196 | 9 1195 l 1197 | 9 1196 l 1198 | 9 1197 l 1199 | 9 1198 l 1200 | 9 1199 l 1201 | 9 1200 l 1202 | 9 1201 l 1203 | 9 1202 l 1204 | 9 1203 l 1205 | 9 1204 l 1206 | 9 1205 l 1207 | 9 1206 l 1208 | 9 1207 l 1209 | 9 1208 l 1210 | 9 1209 l 1211 | 9 1210 l 1212 | 9 1211 l 1213 | 9 1212 l 1214 | 9 1213 l 1215 | 9 1214 l 1216 | 9 1215 l 1217 | 9 1216 l 1218 | 9 1217 l 1219 | 9 1218 l 1220 | 9 1219 l 1221 | 9 1220 l 1222 | 9 1221 l 1223 | 9 1222 l 1224 | 9 1223 l 1225 | 9 1224 l 1226 | 9 1225 l 1227 | 9 1226 l 1228 | 9 1227 l 1229 | 9 1228 l 1230 | 9 1229 l 1231 | 9 1230 l 1232 | 9 1231 l 1233 | 9 1232 l 1234 | 9 1233 l 1235 | 9 1234 l 1236 | 9 1235 l 1237 | 9 1236 l 1238 | 9 1237 l 1239 | 9 1238 l 1240 | 9 1239 l 1241 | 9 1240 l 1242 | 9 1241 l 1243 | 9 1242 l 1244 | 9 1243 l 1245 | 9 1244 l 1246 | 9 1245 l 1247 | 9 1246 l 1248 | 9 1247 l 1249 | 9 1248 l 1250 | 9 1249 l 1251 | 9 1250 l 1252 | 9 1251 l 1253 | 9 1252 l 1254 | 9 1253 l 1255 | 9 1254 l 1256 | 9 1255 l 1257 | 9 1256 l 1258 | 9 1257 l 1259 | 9 1258 l 1260 | 9 1259 l 1261 | 9 1260 l 1262 | 9 1261 l 1263 | 9 1262 l 1264 | 9 1263 l 1265 | 9 1264 l 1266 | 9 1265 l 1267 | 9 1266 l 1268 | 9 1267 l 1269 | 9 1268 l 1270 | 9 1269 l 1271 | 9 1270 l 1272 | 9 1271 l 1273 | 9 1272 l 1274 | 9 1273 l 1275 | 9 1274 l 1276 | 9 1275 l 1277 | 9 1276 l 1278 | 9 1277 l 1279 | 9 1278 l 1280 | 9 1279 l 1281 | 9 1280 l 1282 | 9 1281 l 1283 | 9 1282 l 1284 | 9 1283 l 1285 | 9 1284 l 1286 | 9 1285 l 1287 | 9 1286 l 1288 | 9 1287 l 1289 | 9 1288 l 1290 | 9 1289 l 1291 | 9 1290 l 1292 | 9 1291 l 1293 | 9 1292 l 1294 | 9 1293 l 1295 | 9 1294 l 1296 | 9 1295 l 1297 | 9 1296 l 1298 | 9 1297 l 1299 | 9 1298 l 1300 | 9 1299 l 1301 | 9 1300 l 1302 | 9 1301 l 1303 | 9 1302 l 1304 | 9 1303 l 1305 | 9 1304 l 1306 | 9 1305 l 1307 | 9 1306 l 1308 | 9 1307 l 1309 | 9 1308 l 1310 | 9 1309 l 1311 | 9 1310 l 1312 | 9 1311 l 1313 | 9 1312 l 1314 | 9 1313 l 1315 | 9 1314 l 1316 | 9 1315 l 1317 | 9 1316 l 1318 | 9 1317 l 1319 | 9 1318 l 1320 | 9 1319 l 1321 | 9 1320 l 1322 | 9 1321 l 1323 | 9 1322 l 1324 | 9 1323 l 1325 | 9 1324 l 1326 | 9 1325 l 1327 | 9 1326 l 1328 | 9 1327 l 1329 | 9 1328 l 1330 | 9 1329 l 1331 | 9 1330 l 1332 | 9 1331 l 1333 | 9 1332 l 1334 | 9 1333 l 1335 | 9 1334 l 1336 | 9 1335 l 1337 | 9 1336 l 1338 | 9 1337 l 1339 | 9 1338 l 1340 | 9 1339 l 1341 | 9 1340 l 1342 | 9 1341 l 1343 | 9 1342 l 1344 | 9 1343 l 1345 | 9 1344 l 1346 | 9 1345 l 1347 | 9 1346 l 1348 | 9 1347 l 1349 | 9 1348 l 1350 | 9 1349 l 1351 | 9 1350 l 1352 | 9 1351 l 1353 | 9 1352 l 1354 | 9 1353 l 1355 | 9 1354 l 1356 | 9 1355 l 1357 | 9 1356 l 1358 | 9 1357 l 1359 | 9 1358 l 1360 | 9 1359 l 1361 | 9 1360 l 1362 | 9 1361 l 1363 | 9 1362 l 1364 | 9 1363 l 1365 | 9 1364 l 1366 | 9 1365 l 1367 | 9 1366 l 1368 | 9 1367 l 1369 | 9 1368 l 1370 | 9 1369 l 1371 | 9 1370 l 1372 | 9 1371 l 1373 | 9 1372 l 1374 | 9 1373 l 1375 | 9 1374 l 1376 | 9 1375 l 1377 | 9 1376 l 1378 | 9 1377 l 1379 | 9 1378 l 1380 | 9 1379 l 1381 | 9 1380 l 1382 | 9 1381 l 1383 | 9 1382 l 1384 | 9 1383 l 1385 | 9 1384 l 1386 | 9 1385 l 1387 | 9 1386 l 1388 | 9 1387 l 1389 | 9 1388 l 1390 | 9 1389 l 1391 | 9 1390 l 1392 | 9 1391 l 1393 | 9 1392 l 1394 | 9 1393 l 1395 | 9 1394 l 1396 | 9 1395 l 1397 | 9 1396 l 1398 | 9 1397 l 1399 | 9 1398 l 1400 | 9 1399 l 1401 | 9 1400 l 1402 | 9 1401 l 1403 | 9 1402 l 1404 | 9 1403 l 1405 | 9 1404 l 1406 | 9 1405 l 1407 | 9 1406 l 1408 | 9 1407 l 1409 | 9 1408 l 1410 | 9 1409 l 1411 | 9 1410 l 1412 | 9 1411 l 1413 | 9 1412 l 1414 | 9 1413 l 1415 | 9 1414 l 1416 | 9 1415 l 1417 | 9 1416 l 1418 | 9 1417 l 1419 | 9 1418 l 1420 | 9 1419 l 1421 | 9 1420 l 1422 | 9 1421 l 1423 | 9 1422 l 1424 | 9 1423 l 1425 | 9 1424 l 1426 | 9 1425 l 1427 | 9 1426 l 1428 | 9 1427 l 1429 | 9 1428 l 1430 | 9 1429 l 1431 | 9 1430 l 1432 | 9 1431 l 1433 | 9 1432 l 1434 | 9 1433 l 1435 | 9 1434 l 1436 | 9 1435 l 1437 | 9 1436 l 1438 | 9 1437 l 1439 | 9 1438 l 1440 | 9 1439 l 1441 | 9 1440 l 1442 | 9 1441 l 1443 | 9 1442 l 1444 | 9 1443 l 1445 | 9 1444 l 1446 | 9 1445 l 1447 | 9 1446 l 1448 | 9 1447 l 1449 | 9 1448 l 1450 | 9 1449 l 1451 | 9 1450 l 1452 | 9 1451 l 1453 | 9 1452 l 1454 | 9 1453 l 1455 | 9 1454 l 1456 | 9 1455 l 1457 | 9 1456 l 1458 | 9 1457 l 1459 | 9 1458 l 1460 | 9 1459 l 1461 | 9 1460 l 1462 | 9 1461 l 1463 | 9 1462 l 1464 | 9 1463 l 1465 | 9 1464 l 1466 | 9 1465 l 1467 | 9 1466 l 1468 | 9 1467 l 1469 | 9 1468 l 1470 | 9 1469 l 1471 | 9 1470 l 1472 | 9 1471 l 1473 | 9 1472 l 1474 | 9 1473 l 1475 | 9 1474 l 1476 | 9 1475 l 1477 | 9 1476 l 1478 | 9 1477 l 1479 | 9 1478 l 1480 | 9 1479 l 1481 | 9 1480 l 1482 | 9 1481 l 1483 | 9 1482 l 1484 | 9 1483 l 1485 | 9 1484 l 1486 | 9 1485 l 1487 | 9 1486 l 1488 | 9 1487 l 1489 | 9 1488 l 1490 | 9 1489 l 1491 | 9 1490 l 1492 | 9 1491 l 1493 | 9 1492 l 1494 | 9 1493 l 1495 | 9 1494 l 1496 | 9 1495 l 1497 | 9 1496 l 1498 | 9 1497 l 1499 | 9 1498 l 1500 | 9 1499 l 1501 | 9 1500 l 1502 | 9 1501 l 1503 | 9 1502 l 1504 | 9 1503 l 1505 | 9 1504 l 1506 | 9 1505 l 1507 | 9 1506 l 1508 | 9 1507 l 1509 | 9 1508 l 1510 | 9 1509 l 1511 | 9 1510 l 1512 | 9 1511 l 1513 | 9 1512 l 1514 | 9 1513 l 1515 | 9 1514 l 1516 | 9 1515 l 1517 | 9 1516 l 1518 | 9 1517 l 1519 | 9 1518 l 1520 | 9 1519 l 1521 | 9 1520 l 1522 | 9 1521 l 1523 | 9 1522 l 1524 | 9 1523 l 1525 | 9 1524 l 1526 | 9 1525 l 1527 | 9 1526 l 1528 | 9 1527 l 1529 | 9 1528 l 1530 | 9 1529 l 1531 | 9 1530 l 1532 | 9 1531 l 1533 | 9 1532 l 1534 | 9 1533 l 1535 | 9 1534 l 1536 | 9 1535 l 1537 | 9 1536 l 1538 | 9 1537 l 1539 | 9 1538 l 1540 | 9 1539 l 1541 | 9 1540 l 1542 | 9 1541 l 1543 | 9 1542 l 1544 | 9 1543 l 1545 | 9 1544 l 1546 | 9 1545 l 1547 | 9 1546 l 1548 | 9 1547 l 1549 | 9 1548 l 1550 | 9 1549 l 1551 | 9 1550 l 1552 | 9 1551 l 1553 | 9 1552 l 1554 | 9 1553 l 1555 | 9 1554 l 1556 | 9 1555 l 1557 | 9 1556 l 1558 | 9 1557 l 1559 | 9 1558 l 1560 | 9 1559 l 1561 | 9 1560 l 1562 | 9 1561 l 1563 | 9 1562 l 1564 | 9 1563 l 1565 | 9 1564 l 1566 | 9 1565 l 1567 | 9 1566 l 1568 | 9 1567 l 1569 | 9 1568 l 1570 | 9 1569 l 1571 | 9 1570 l 1572 | 9 1571 l 1573 | 9 1572 l 1574 | 9 1573 l 1575 | 9 1574 l 1576 | 9 1575 l 1577 | 9 1576 l 1578 | 9 1577 l 1579 | 9 1578 l 1580 | 9 1579 l 1581 | 9 1580 l 1582 | 9 1581 l 1583 | 9 1582 l 1584 | 9 1583 l 1585 | 9 1584 l 1586 | 9 1585 l 1587 | 9 1586 l 1588 | 9 1587 l 1589 | 9 1588 l 1590 | 9 1589 l 1591 | -------------------------------------------------------------------------------- /splits/odom/test_files_10.txt: -------------------------------------------------------------------------------- 1 | 10 0 l 2 | 10 1 l 3 | 10 2 l 4 | 10 3 l 5 | 10 4 l 6 | 10 5 l 7 | 10 6 l 8 | 10 7 l 9 | 10 8 l 10 | 10 9 l 11 | 10 10 l 12 | 10 11 l 13 | 10 12 l 14 | 10 13 l 15 | 10 14 l 16 | 10 15 l 17 | 10 16 l 18 | 10 17 l 19 | 10 18 l 20 | 10 19 l 21 | 10 20 l 22 | 10 21 l 23 | 10 22 l 24 | 10 23 l 25 | 10 24 l 26 | 10 25 l 27 | 10 26 l 28 | 10 27 l 29 | 10 28 l 30 | 10 29 l 31 | 10 30 l 32 | 10 31 l 33 | 10 32 l 34 | 10 33 l 35 | 10 34 l 36 | 10 35 l 37 | 10 36 l 38 | 10 37 l 39 | 10 38 l 40 | 10 39 l 41 | 10 40 l 42 | 10 41 l 43 | 10 42 l 44 | 10 43 l 45 | 10 44 l 46 | 10 45 l 47 | 10 46 l 48 | 10 47 l 49 | 10 48 l 50 | 10 49 l 51 | 10 50 l 52 | 10 51 l 53 | 10 52 l 54 | 10 53 l 55 | 10 54 l 56 | 10 55 l 57 | 10 56 l 58 | 10 57 l 59 | 10 58 l 60 | 10 59 l 61 | 10 60 l 62 | 10 61 l 63 | 10 62 l 64 | 10 63 l 65 | 10 64 l 66 | 10 65 l 67 | 10 66 l 68 | 10 67 l 69 | 10 68 l 70 | 10 69 l 71 | 10 70 l 72 | 10 71 l 73 | 10 72 l 74 | 10 73 l 75 | 10 74 l 76 | 10 75 l 77 | 10 76 l 78 | 10 77 l 79 | 10 78 l 80 | 10 79 l 81 | 10 80 l 82 | 10 81 l 83 | 10 82 l 84 | 10 83 l 85 | 10 84 l 86 | 10 85 l 87 | 10 86 l 88 | 10 87 l 89 | 10 88 l 90 | 10 89 l 91 | 10 90 l 92 | 10 91 l 93 | 10 92 l 94 | 10 93 l 95 | 10 94 l 96 | 10 95 l 97 | 10 96 l 98 | 10 97 l 99 | 10 98 l 100 | 10 99 l 101 | 10 100 l 102 | 10 101 l 103 | 10 102 l 104 | 10 103 l 105 | 10 104 l 106 | 10 105 l 107 | 10 106 l 108 | 10 107 l 109 | 10 108 l 110 | 10 109 l 111 | 10 110 l 112 | 10 111 l 113 | 10 112 l 114 | 10 113 l 115 | 10 114 l 116 | 10 115 l 117 | 10 116 l 118 | 10 117 l 119 | 10 118 l 120 | 10 119 l 121 | 10 120 l 122 | 10 121 l 123 | 10 122 l 124 | 10 123 l 125 | 10 124 l 126 | 10 125 l 127 | 10 126 l 128 | 10 127 l 129 | 10 128 l 130 | 10 129 l 131 | 10 130 l 132 | 10 131 l 133 | 10 132 l 134 | 10 133 l 135 | 10 134 l 136 | 10 135 l 137 | 10 136 l 138 | 10 137 l 139 | 10 138 l 140 | 10 139 l 141 | 10 140 l 142 | 10 141 l 143 | 10 142 l 144 | 10 143 l 145 | 10 144 l 146 | 10 145 l 147 | 10 146 l 148 | 10 147 l 149 | 10 148 l 150 | 10 149 l 151 | 10 150 l 152 | 10 151 l 153 | 10 152 l 154 | 10 153 l 155 | 10 154 l 156 | 10 155 l 157 | 10 156 l 158 | 10 157 l 159 | 10 158 l 160 | 10 159 l 161 | 10 160 l 162 | 10 161 l 163 | 10 162 l 164 | 10 163 l 165 | 10 164 l 166 | 10 165 l 167 | 10 166 l 168 | 10 167 l 169 | 10 168 l 170 | 10 169 l 171 | 10 170 l 172 | 10 171 l 173 | 10 172 l 174 | 10 173 l 175 | 10 174 l 176 | 10 175 l 177 | 10 176 l 178 | 10 177 l 179 | 10 178 l 180 | 10 179 l 181 | 10 180 l 182 | 10 181 l 183 | 10 182 l 184 | 10 183 l 185 | 10 184 l 186 | 10 185 l 187 | 10 186 l 188 | 10 187 l 189 | 10 188 l 190 | 10 189 l 191 | 10 190 l 192 | 10 191 l 193 | 10 192 l 194 | 10 193 l 195 | 10 194 l 196 | 10 195 l 197 | 10 196 l 198 | 10 197 l 199 | 10 198 l 200 | 10 199 l 201 | 10 200 l 202 | 10 201 l 203 | 10 202 l 204 | 10 203 l 205 | 10 204 l 206 | 10 205 l 207 | 10 206 l 208 | 10 207 l 209 | 10 208 l 210 | 10 209 l 211 | 10 210 l 212 | 10 211 l 213 | 10 212 l 214 | 10 213 l 215 | 10 214 l 216 | 10 215 l 217 | 10 216 l 218 | 10 217 l 219 | 10 218 l 220 | 10 219 l 221 | 10 220 l 222 | 10 221 l 223 | 10 222 l 224 | 10 223 l 225 | 10 224 l 226 | 10 225 l 227 | 10 226 l 228 | 10 227 l 229 | 10 228 l 230 | 10 229 l 231 | 10 230 l 232 | 10 231 l 233 | 10 232 l 234 | 10 233 l 235 | 10 234 l 236 | 10 235 l 237 | 10 236 l 238 | 10 237 l 239 | 10 238 l 240 | 10 239 l 241 | 10 240 l 242 | 10 241 l 243 | 10 242 l 244 | 10 243 l 245 | 10 244 l 246 | 10 245 l 247 | 10 246 l 248 | 10 247 l 249 | 10 248 l 250 | 10 249 l 251 | 10 250 l 252 | 10 251 l 253 | 10 252 l 254 | 10 253 l 255 | 10 254 l 256 | 10 255 l 257 | 10 256 l 258 | 10 257 l 259 | 10 258 l 260 | 10 259 l 261 | 10 260 l 262 | 10 261 l 263 | 10 262 l 264 | 10 263 l 265 | 10 264 l 266 | 10 265 l 267 | 10 266 l 268 | 10 267 l 269 | 10 268 l 270 | 10 269 l 271 | 10 270 l 272 | 10 271 l 273 | 10 272 l 274 | 10 273 l 275 | 10 274 l 276 | 10 275 l 277 | 10 276 l 278 | 10 277 l 279 | 10 278 l 280 | 10 279 l 281 | 10 280 l 282 | 10 281 l 283 | 10 282 l 284 | 10 283 l 285 | 10 284 l 286 | 10 285 l 287 | 10 286 l 288 | 10 287 l 289 | 10 288 l 290 | 10 289 l 291 | 10 290 l 292 | 10 291 l 293 | 10 292 l 294 | 10 293 l 295 | 10 294 l 296 | 10 295 l 297 | 10 296 l 298 | 10 297 l 299 | 10 298 l 300 | 10 299 l 301 | 10 300 l 302 | 10 301 l 303 | 10 302 l 304 | 10 303 l 305 | 10 304 l 306 | 10 305 l 307 | 10 306 l 308 | 10 307 l 309 | 10 308 l 310 | 10 309 l 311 | 10 310 l 312 | 10 311 l 313 | 10 312 l 314 | 10 313 l 315 | 10 314 l 316 | 10 315 l 317 | 10 316 l 318 | 10 317 l 319 | 10 318 l 320 | 10 319 l 321 | 10 320 l 322 | 10 321 l 323 | 10 322 l 324 | 10 323 l 325 | 10 324 l 326 | 10 325 l 327 | 10 326 l 328 | 10 327 l 329 | 10 328 l 330 | 10 329 l 331 | 10 330 l 332 | 10 331 l 333 | 10 332 l 334 | 10 333 l 335 | 10 334 l 336 | 10 335 l 337 | 10 336 l 338 | 10 337 l 339 | 10 338 l 340 | 10 339 l 341 | 10 340 l 342 | 10 341 l 343 | 10 342 l 344 | 10 343 l 345 | 10 344 l 346 | 10 345 l 347 | 10 346 l 348 | 10 347 l 349 | 10 348 l 350 | 10 349 l 351 | 10 350 l 352 | 10 351 l 353 | 10 352 l 354 | 10 353 l 355 | 10 354 l 356 | 10 355 l 357 | 10 356 l 358 | 10 357 l 359 | 10 358 l 360 | 10 359 l 361 | 10 360 l 362 | 10 361 l 363 | 10 362 l 364 | 10 363 l 365 | 10 364 l 366 | 10 365 l 367 | 10 366 l 368 | 10 367 l 369 | 10 368 l 370 | 10 369 l 371 | 10 370 l 372 | 10 371 l 373 | 10 372 l 374 | 10 373 l 375 | 10 374 l 376 | 10 375 l 377 | 10 376 l 378 | 10 377 l 379 | 10 378 l 380 | 10 379 l 381 | 10 380 l 382 | 10 381 l 383 | 10 382 l 384 | 10 383 l 385 | 10 384 l 386 | 10 385 l 387 | 10 386 l 388 | 10 387 l 389 | 10 388 l 390 | 10 389 l 391 | 10 390 l 392 | 10 391 l 393 | 10 392 l 394 | 10 393 l 395 | 10 394 l 396 | 10 395 l 397 | 10 396 l 398 | 10 397 l 399 | 10 398 l 400 | 10 399 l 401 | 10 400 l 402 | 10 401 l 403 | 10 402 l 404 | 10 403 l 405 | 10 404 l 406 | 10 405 l 407 | 10 406 l 408 | 10 407 l 409 | 10 408 l 410 | 10 409 l 411 | 10 410 l 412 | 10 411 l 413 | 10 412 l 414 | 10 413 l 415 | 10 414 l 416 | 10 415 l 417 | 10 416 l 418 | 10 417 l 419 | 10 418 l 420 | 10 419 l 421 | 10 420 l 422 | 10 421 l 423 | 10 422 l 424 | 10 423 l 425 | 10 424 l 426 | 10 425 l 427 | 10 426 l 428 | 10 427 l 429 | 10 428 l 430 | 10 429 l 431 | 10 430 l 432 | 10 431 l 433 | 10 432 l 434 | 10 433 l 435 | 10 434 l 436 | 10 435 l 437 | 10 436 l 438 | 10 437 l 439 | 10 438 l 440 | 10 439 l 441 | 10 440 l 442 | 10 441 l 443 | 10 442 l 444 | 10 443 l 445 | 10 444 l 446 | 10 445 l 447 | 10 446 l 448 | 10 447 l 449 | 10 448 l 450 | 10 449 l 451 | 10 450 l 452 | 10 451 l 453 | 10 452 l 454 | 10 453 l 455 | 10 454 l 456 | 10 455 l 457 | 10 456 l 458 | 10 457 l 459 | 10 458 l 460 | 10 459 l 461 | 10 460 l 462 | 10 461 l 463 | 10 462 l 464 | 10 463 l 465 | 10 464 l 466 | 10 465 l 467 | 10 466 l 468 | 10 467 l 469 | 10 468 l 470 | 10 469 l 471 | 10 470 l 472 | 10 471 l 473 | 10 472 l 474 | 10 473 l 475 | 10 474 l 476 | 10 475 l 477 | 10 476 l 478 | 10 477 l 479 | 10 478 l 480 | 10 479 l 481 | 10 480 l 482 | 10 481 l 483 | 10 482 l 484 | 10 483 l 485 | 10 484 l 486 | 10 485 l 487 | 10 486 l 488 | 10 487 l 489 | 10 488 l 490 | 10 489 l 491 | 10 490 l 492 | 10 491 l 493 | 10 492 l 494 | 10 493 l 495 | 10 494 l 496 | 10 495 l 497 | 10 496 l 498 | 10 497 l 499 | 10 498 l 500 | 10 499 l 501 | 10 500 l 502 | 10 501 l 503 | 10 502 l 504 | 10 503 l 505 | 10 504 l 506 | 10 505 l 507 | 10 506 l 508 | 10 507 l 509 | 10 508 l 510 | 10 509 l 511 | 10 510 l 512 | 10 511 l 513 | 10 512 l 514 | 10 513 l 515 | 10 514 l 516 | 10 515 l 517 | 10 516 l 518 | 10 517 l 519 | 10 518 l 520 | 10 519 l 521 | 10 520 l 522 | 10 521 l 523 | 10 522 l 524 | 10 523 l 525 | 10 524 l 526 | 10 525 l 527 | 10 526 l 528 | 10 527 l 529 | 10 528 l 530 | 10 529 l 531 | 10 530 l 532 | 10 531 l 533 | 10 532 l 534 | 10 533 l 535 | 10 534 l 536 | 10 535 l 537 | 10 536 l 538 | 10 537 l 539 | 10 538 l 540 | 10 539 l 541 | 10 540 l 542 | 10 541 l 543 | 10 542 l 544 | 10 543 l 545 | 10 544 l 546 | 10 545 l 547 | 10 546 l 548 | 10 547 l 549 | 10 548 l 550 | 10 549 l 551 | 10 550 l 552 | 10 551 l 553 | 10 552 l 554 | 10 553 l 555 | 10 554 l 556 | 10 555 l 557 | 10 556 l 558 | 10 557 l 559 | 10 558 l 560 | 10 559 l 561 | 10 560 l 562 | 10 561 l 563 | 10 562 l 564 | 10 563 l 565 | 10 564 l 566 | 10 565 l 567 | 10 566 l 568 | 10 567 l 569 | 10 568 l 570 | 10 569 l 571 | 10 570 l 572 | 10 571 l 573 | 10 572 l 574 | 10 573 l 575 | 10 574 l 576 | 10 575 l 577 | 10 576 l 578 | 10 577 l 579 | 10 578 l 580 | 10 579 l 581 | 10 580 l 582 | 10 581 l 583 | 10 582 l 584 | 10 583 l 585 | 10 584 l 586 | 10 585 l 587 | 10 586 l 588 | 10 587 l 589 | 10 588 l 590 | 10 589 l 591 | 10 590 l 592 | 10 591 l 593 | 10 592 l 594 | 10 593 l 595 | 10 594 l 596 | 10 595 l 597 | 10 596 l 598 | 10 597 l 599 | 10 598 l 600 | 10 599 l 601 | 10 600 l 602 | 10 601 l 603 | 10 602 l 604 | 10 603 l 605 | 10 604 l 606 | 10 605 l 607 | 10 606 l 608 | 10 607 l 609 | 10 608 l 610 | 10 609 l 611 | 10 610 l 612 | 10 611 l 613 | 10 612 l 614 | 10 613 l 615 | 10 614 l 616 | 10 615 l 617 | 10 616 l 618 | 10 617 l 619 | 10 618 l 620 | 10 619 l 621 | 10 620 l 622 | 10 621 l 623 | 10 622 l 624 | 10 623 l 625 | 10 624 l 626 | 10 625 l 627 | 10 626 l 628 | 10 627 l 629 | 10 628 l 630 | 10 629 l 631 | 10 630 l 632 | 10 631 l 633 | 10 632 l 634 | 10 633 l 635 | 10 634 l 636 | 10 635 l 637 | 10 636 l 638 | 10 637 l 639 | 10 638 l 640 | 10 639 l 641 | 10 640 l 642 | 10 641 l 643 | 10 642 l 644 | 10 643 l 645 | 10 644 l 646 | 10 645 l 647 | 10 646 l 648 | 10 647 l 649 | 10 648 l 650 | 10 649 l 651 | 10 650 l 652 | 10 651 l 653 | 10 652 l 654 | 10 653 l 655 | 10 654 l 656 | 10 655 l 657 | 10 656 l 658 | 10 657 l 659 | 10 658 l 660 | 10 659 l 661 | 10 660 l 662 | 10 661 l 663 | 10 662 l 664 | 10 663 l 665 | 10 664 l 666 | 10 665 l 667 | 10 666 l 668 | 10 667 l 669 | 10 668 l 670 | 10 669 l 671 | 10 670 l 672 | 10 671 l 673 | 10 672 l 674 | 10 673 l 675 | 10 674 l 676 | 10 675 l 677 | 10 676 l 678 | 10 677 l 679 | 10 678 l 680 | 10 679 l 681 | 10 680 l 682 | 10 681 l 683 | 10 682 l 684 | 10 683 l 685 | 10 684 l 686 | 10 685 l 687 | 10 686 l 688 | 10 687 l 689 | 10 688 l 690 | 10 689 l 691 | 10 690 l 692 | 10 691 l 693 | 10 692 l 694 | 10 693 l 695 | 10 694 l 696 | 10 695 l 697 | 10 696 l 698 | 10 697 l 699 | 10 698 l 700 | 10 699 l 701 | 10 700 l 702 | 10 701 l 703 | 10 702 l 704 | 10 703 l 705 | 10 704 l 706 | 10 705 l 707 | 10 706 l 708 | 10 707 l 709 | 10 708 l 710 | 10 709 l 711 | 10 710 l 712 | 10 711 l 713 | 10 712 l 714 | 10 713 l 715 | 10 714 l 716 | 10 715 l 717 | 10 716 l 718 | 10 717 l 719 | 10 718 l 720 | 10 719 l 721 | 10 720 l 722 | 10 721 l 723 | 10 722 l 724 | 10 723 l 725 | 10 724 l 726 | 10 725 l 727 | 10 726 l 728 | 10 727 l 729 | 10 728 l 730 | 10 729 l 731 | 10 730 l 732 | 10 731 l 733 | 10 732 l 734 | 10 733 l 735 | 10 734 l 736 | 10 735 l 737 | 10 736 l 738 | 10 737 l 739 | 10 738 l 740 | 10 739 l 741 | 10 740 l 742 | 10 741 l 743 | 10 742 l 744 | 10 743 l 745 | 10 744 l 746 | 10 745 l 747 | 10 746 l 748 | 10 747 l 749 | 10 748 l 750 | 10 749 l 751 | 10 750 l 752 | 10 751 l 753 | 10 752 l 754 | 10 753 l 755 | 10 754 l 756 | 10 755 l 757 | 10 756 l 758 | 10 757 l 759 | 10 758 l 760 | 10 759 l 761 | 10 760 l 762 | 10 761 l 763 | 10 762 l 764 | 10 763 l 765 | 10 764 l 766 | 10 765 l 767 | 10 766 l 768 | 10 767 l 769 | 10 768 l 770 | 10 769 l 771 | 10 770 l 772 | 10 771 l 773 | 10 772 l 774 | 10 773 l 775 | 10 774 l 776 | 10 775 l 777 | 10 776 l 778 | 10 777 l 779 | 10 778 l 780 | 10 779 l 781 | 10 780 l 782 | 10 781 l 783 | 10 782 l 784 | 10 783 l 785 | 10 784 l 786 | 10 785 l 787 | 10 786 l 788 | 10 787 l 789 | 10 788 l 790 | 10 789 l 791 | 10 790 l 792 | 10 791 l 793 | 10 792 l 794 | 10 793 l 795 | 10 794 l 796 | 10 795 l 797 | 10 796 l 798 | 10 797 l 799 | 10 798 l 800 | 10 799 l 801 | 10 800 l 802 | 10 801 l 803 | 10 802 l 804 | 10 803 l 805 | 10 804 l 806 | 10 805 l 807 | 10 806 l 808 | 10 807 l 809 | 10 808 l 810 | 10 809 l 811 | 10 810 l 812 | 10 811 l 813 | 10 812 l 814 | 10 813 l 815 | 10 814 l 816 | 10 815 l 817 | 10 816 l 818 | 10 817 l 819 | 10 818 l 820 | 10 819 l 821 | 10 820 l 822 | 10 821 l 823 | 10 822 l 824 | 10 823 l 825 | 10 824 l 826 | 10 825 l 827 | 10 826 l 828 | 10 827 l 829 | 10 828 l 830 | 10 829 l 831 | 10 830 l 832 | 10 831 l 833 | 10 832 l 834 | 10 833 l 835 | 10 834 l 836 | 10 835 l 837 | 10 836 l 838 | 10 837 l 839 | 10 838 l 840 | 10 839 l 841 | 10 840 l 842 | 10 841 l 843 | 10 842 l 844 | 10 843 l 845 | 10 844 l 846 | 10 845 l 847 | 10 846 l 848 | 10 847 l 849 | 10 848 l 850 | 10 849 l 851 | 10 850 l 852 | 10 851 l 853 | 10 852 l 854 | 10 853 l 855 | 10 854 l 856 | 10 855 l 857 | 10 856 l 858 | 10 857 l 859 | 10 858 l 860 | 10 859 l 861 | 10 860 l 862 | 10 861 l 863 | 10 862 l 864 | 10 863 l 865 | 10 864 l 866 | 10 865 l 867 | 10 866 l 868 | 10 867 l 869 | 10 868 l 870 | 10 869 l 871 | 10 870 l 872 | 10 871 l 873 | 10 872 l 874 | 10 873 l 875 | 10 874 l 876 | 10 875 l 877 | 10 876 l 878 | 10 877 l 879 | 10 878 l 880 | 10 879 l 881 | 10 880 l 882 | 10 881 l 883 | 10 882 l 884 | 10 883 l 885 | 10 884 l 886 | 10 885 l 887 | 10 886 l 888 | 10 887 l 889 | 10 888 l 890 | 10 889 l 891 | 10 890 l 892 | 10 891 l 893 | 10 892 l 894 | 10 893 l 895 | 10 894 l 896 | 10 895 l 897 | 10 896 l 898 | 10 897 l 899 | 10 898 l 900 | 10 899 l 901 | 10 900 l 902 | 10 901 l 903 | 10 902 l 904 | 10 903 l 905 | 10 904 l 906 | 10 905 l 907 | 10 906 l 908 | 10 907 l 909 | 10 908 l 910 | 10 909 l 911 | 10 910 l 912 | 10 911 l 913 | 10 912 l 914 | 10 913 l 915 | 10 914 l 916 | 10 915 l 917 | 10 916 l 918 | 10 917 l 919 | 10 918 l 920 | 10 919 l 921 | 10 920 l 922 | 10 921 l 923 | 10 922 l 924 | 10 923 l 925 | 10 924 l 926 | 10 925 l 927 | 10 926 l 928 | 10 927 l 929 | 10 928 l 930 | 10 929 l 931 | 10 930 l 932 | 10 931 l 933 | 10 932 l 934 | 10 933 l 935 | 10 934 l 936 | 10 935 l 937 | 10 936 l 938 | 10 937 l 939 | 10 938 l 940 | 10 939 l 941 | 10 940 l 942 | 10 941 l 943 | 10 942 l 944 | 10 943 l 945 | 10 944 l 946 | 10 945 l 947 | 10 946 l 948 | 10 947 l 949 | 10 948 l 950 | 10 949 l 951 | 10 950 l 952 | 10 951 l 953 | 10 952 l 954 | 10 953 l 955 | 10 954 l 956 | 10 955 l 957 | 10 956 l 958 | 10 957 l 959 | 10 958 l 960 | 10 959 l 961 | 10 960 l 962 | 10 961 l 963 | 10 962 l 964 | 10 963 l 965 | 10 964 l 966 | 10 965 l 967 | 10 966 l 968 | 10 967 l 969 | 10 968 l 970 | 10 969 l 971 | 10 970 l 972 | 10 971 l 973 | 10 972 l 974 | 10 973 l 975 | 10 974 l 976 | 10 975 l 977 | 10 976 l 978 | 10 977 l 979 | 10 978 l 980 | 10 979 l 981 | 10 980 l 982 | 10 981 l 983 | 10 982 l 984 | 10 983 l 985 | 10 984 l 986 | 10 985 l 987 | 10 986 l 988 | 10 987 l 989 | 10 988 l 990 | 10 989 l 991 | 10 990 l 992 | 10 991 l 993 | 10 992 l 994 | 10 993 l 995 | 10 994 l 996 | 10 995 l 997 | 10 996 l 998 | 10 997 l 999 | 10 998 l 1000 | 10 999 l 1001 | 10 1000 l 1002 | 10 1001 l 1003 | 10 1002 l 1004 | 10 1003 l 1005 | 10 1004 l 1006 | 10 1005 l 1007 | 10 1006 l 1008 | 10 1007 l 1009 | 10 1008 l 1010 | 10 1009 l 1011 | 10 1010 l 1012 | 10 1011 l 1013 | 10 1012 l 1014 | 10 1013 l 1015 | 10 1014 l 1016 | 10 1015 l 1017 | 10 1016 l 1018 | 10 1017 l 1019 | 10 1018 l 1020 | 10 1019 l 1021 | 10 1020 l 1022 | 10 1021 l 1023 | 10 1022 l 1024 | 10 1023 l 1025 | 10 1024 l 1026 | 10 1025 l 1027 | 10 1026 l 1028 | 10 1027 l 1029 | 10 1028 l 1030 | 10 1029 l 1031 | 10 1030 l 1032 | 10 1031 l 1033 | 10 1032 l 1034 | 10 1033 l 1035 | 10 1034 l 1036 | 10 1035 l 1037 | 10 1036 l 1038 | 10 1037 l 1039 | 10 1038 l 1040 | 10 1039 l 1041 | 10 1040 l 1042 | 10 1041 l 1043 | 10 1042 l 1044 | 10 1043 l 1045 | 10 1044 l 1046 | 10 1045 l 1047 | 10 1046 l 1048 | 10 1047 l 1049 | 10 1048 l 1050 | 10 1049 l 1051 | 10 1050 l 1052 | 10 1051 l 1053 | 10 1052 l 1054 | 10 1053 l 1055 | 10 1054 l 1056 | 10 1055 l 1057 | 10 1056 l 1058 | 10 1057 l 1059 | 10 1058 l 1060 | 10 1059 l 1061 | 10 1060 l 1062 | 10 1061 l 1063 | 10 1062 l 1064 | 10 1063 l 1065 | 10 1064 l 1066 | 10 1065 l 1067 | 10 1066 l 1068 | 10 1067 l 1069 | 10 1068 l 1070 | 10 1069 l 1071 | 10 1070 l 1072 | 10 1071 l 1073 | 10 1072 l 1074 | 10 1073 l 1075 | 10 1074 l 1076 | 10 1075 l 1077 | 10 1076 l 1078 | 10 1077 l 1079 | 10 1078 l 1080 | 10 1079 l 1081 | 10 1080 l 1082 | 10 1081 l 1083 | 10 1082 l 1084 | 10 1083 l 1085 | 10 1084 l 1086 | 10 1085 l 1087 | 10 1086 l 1088 | 10 1087 l 1089 | 10 1088 l 1090 | 10 1089 l 1091 | 10 1090 l 1092 | 10 1091 l 1093 | 10 1092 l 1094 | 10 1093 l 1095 | 10 1094 l 1096 | 10 1095 l 1097 | 10 1096 l 1098 | 10 1097 l 1099 | 10 1098 l 1100 | 10 1099 l 1101 | 10 1100 l 1102 | 10 1101 l 1103 | 10 1102 l 1104 | 10 1103 l 1105 | 10 1104 l 1106 | 10 1105 l 1107 | 10 1106 l 1108 | 10 1107 l 1109 | 10 1108 l 1110 | 10 1109 l 1111 | 10 1110 l 1112 | 10 1111 l 1113 | 10 1112 l 1114 | 10 1113 l 1115 | 10 1114 l 1116 | 10 1115 l 1117 | 10 1116 l 1118 | 10 1117 l 1119 | 10 1118 l 1120 | 10 1119 l 1121 | 10 1120 l 1122 | 10 1121 l 1123 | 10 1122 l 1124 | 10 1123 l 1125 | 10 1124 l 1126 | 10 1125 l 1127 | 10 1126 l 1128 | 10 1127 l 1129 | 10 1128 l 1130 | 10 1129 l 1131 | 10 1130 l 1132 | 10 1131 l 1133 | 10 1132 l 1134 | 10 1133 l 1135 | 10 1134 l 1136 | 10 1135 l 1137 | 10 1136 l 1138 | 10 1137 l 1139 | 10 1138 l 1140 | 10 1139 l 1141 | 10 1140 l 1142 | 10 1141 l 1143 | 10 1142 l 1144 | 10 1143 l 1145 | 10 1144 l 1146 | 10 1145 l 1147 | 10 1146 l 1148 | 10 1147 l 1149 | 10 1148 l 1150 | 10 1149 l 1151 | 10 1150 l 1152 | 10 1151 l 1153 | 10 1152 l 1154 | 10 1153 l 1155 | 10 1154 l 1156 | 10 1155 l 1157 | 10 1156 l 1158 | 10 1157 l 1159 | 10 1158 l 1160 | 10 1159 l 1161 | 10 1160 l 1162 | 10 1161 l 1163 | 10 1162 l 1164 | 10 1163 l 1165 | 10 1164 l 1166 | 10 1165 l 1167 | 10 1166 l 1168 | 10 1167 l 1169 | 10 1168 l 1170 | 10 1169 l 1171 | 10 1170 l 1172 | 10 1171 l 1173 | 10 1172 l 1174 | 10 1173 l 1175 | 10 1174 l 1176 | 10 1175 l 1177 | 10 1176 l 1178 | 10 1177 l 1179 | 10 1178 l 1180 | 10 1179 l 1181 | 10 1180 l 1182 | 10 1181 l 1183 | 10 1182 l 1184 | 10 1183 l 1185 | 10 1184 l 1186 | 10 1185 l 1187 | 10 1186 l 1188 | 10 1187 l 1189 | 10 1188 l 1190 | 10 1189 l 1191 | 10 1190 l 1192 | 10 1191 l 1193 | 10 1192 l 1194 | 10 1193 l 1195 | 10 1194 l 1196 | 10 1195 l 1197 | 10 1196 l 1198 | 10 1197 l 1199 | 10 1198 l 1200 | 10 1199 l 1201 | -------------------------------------------------------------------------------- /start2test.sh: -------------------------------------------------------------------------------- 1 | num='0' 2 | CUDA_VISIBLE_DEVICES=0 python -u -m test --load_weights_folder logs/weights --cutmix False --use_freeze_epoch 20 --seed 1024 --scheduler_step_size 14 --batch 1 --model_name model$num --png 3 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | import os 3 | 4 | os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=4 5 | os.environ["OPENBLAS_NUM_THREADS"] = "1" # export OPENBLAS_NUM_THREADS=4 6 | os.environ["MKL_NUM_THREADS"] = "1" # export MKL_NUM_THREADS=6 7 | os.environ["VECLIB_MAXIMUM_THREADS"] = "1" # export VECLIB_MAXIMUM_THREADS=4 8 | os.environ["NUMEXPR_NUM_THREADS"] = "1" # export NUMEXPR_NUM_THREADS=6 9 | from test_dev import TESTER 10 | from options import MonodepthOptions 11 | 12 | options = MonodepthOptions() 13 | opts = options.parse() 14 | import cv2 15 | import torch 16 | import random 17 | import numpy as np 18 | from PIL import Image 19 | seed = opts.seed 20 | # Set the random seed for Python's random module 21 | random.seed(seed) 22 | # Set the random seed for Numpy 23 | np.random.seed(seed) 24 | # Set the random seed for PyTorch 25 | torch.manual_seed(seed) 26 | torch.cuda.manual_seed(seed) 27 | torch.cuda.manual_seed_all(seed) 28 | torch.backends.cudnn.deterministic = True 29 | torch.backends.cudnn.benchmark = False 30 | 31 | if __name__ == "__main__": 32 | tester = TESTER(opts) 33 | tester.test() 34 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | import os 3 | import hashlib 4 | import zipfile 5 | from six.moves import urllib 6 | 7 | def readlines(filename): 8 | """Read all the lines in a text file and return as a list 9 | """ 10 | with open(filename, 'r') as f: 11 | lines = f.read().splitlines() 12 | return lines 13 | 14 | def normalize_image(x): 15 | """Rescale image pixels to span range [0, 1] 16 | """ 17 | ma = float(x.max().cpu().data) 18 | mi = float(x.min().cpu().data) 19 | d = ma - mi if ma != mi else 1e5 20 | return (x - mi) / d 21 | 22 | def sec_to_hm(t): 23 | """Convert time in seconds to time in hours, minutes and seconds 24 | e.g. 10239 -> (2, 50, 39) 25 | """ 26 | t = int(t) 27 | s = t % 60 28 | t //= 60 29 | m = t % 60 30 | t //= 60 31 | return t, m, s 32 | 33 | def sec_to_hm_str(t): 34 | """Convert time in seconds to a nice string 35 | e.g. 10239 -> '02h50m39s' 36 | """ 37 | h, m, s = sec_to_hm(t) 38 | return "{:02d}h{:02d}m{:02d}s".format(h, m, s) --------------------------------------------------------------------------------