├── .gitignore ├── DeepCalib_architectures.png ├── README.md ├── Results.png ├── dataset ├── continuous_dataset_generation.py ├── discrete_dataset_generation.py ├── download_images.py ├── indoor │ ├── airplane_interior.txt │ ├── aquarium.txt │ ├── bathroom.txt │ ├── bedroom.txt │ ├── belfry.txt │ ├── car_interior.txt │ ├── cave.txt │ ├── childs_room_daycare.txt │ ├── church.txt │ ├── classroom.txt │ ├── closet.txt │ ├── conference_room.txt │ ├── corridor.txt │ ├── dining_room.txt │ ├── elevator.txt │ ├── expo_showroom.txt │ ├── greenhouse.txt │ ├── gym.txt │ ├── hangar.txt │ ├── hospital_room.txt │ ├── indoor_pool.txt │ ├── jail_cell.txt │ ├── kitchen.txt │ ├── laboratory.txt │ ├── legislative_chamber.txt │ ├── library.txt │ ├── living_room.txt │ ├── lobby_atrium.txt │ ├── massage_room.txt │ ├── mechanical_room.txt │ ├── museum.txt │ ├── observatory.txt │ ├── office.txt │ ├── old_building.txt │ ├── others.txt │ ├── parking_garage.txt │ ├── pilothouse.txt │ ├── restaurant.txt │ ├── sauna.txt │ ├── shop.txt │ ├── stable.txt │ ├── stadium.txt │ ├── staircase.txt │ ├── studio.txt │ ├── subway_station.txt │ ├── tent.txt │ ├── theater.txt │ ├── tomb.txt │ ├── train_interior.txt │ └── workshop.txt ├── my_interpol.py ├── outdoor │ ├── airport.txt │ ├── amphitheatre.txt │ ├── arena.txt │ ├── balcony.txt │ ├── beach.txt │ ├── boat_deck.txt │ ├── bridge.txt │ ├── cemetery.txt │ ├── coast.txt │ ├── construction_site.txt │ ├── desert.txt │ ├── field.txt │ ├── forest.txt │ ├── garden.txt │ ├── gulch.txt │ ├── highway.txt │ ├── jetty.txt │ ├── lawn.txt │ ├── mountain.txt │ ├── others.txt │ ├── park.txt │ ├── parking_lot.txt │ ├── patio.txt │ ├── plaza_courtyard.txt │ ├── ruin.txt │ ├── skatepark.txt │ ├── sports_field.txt │ ├── street.txt │ ├── swimming_pool.txt │ ├── train_station_or_track.txt │ ├── underwater.txt │ └── wharf.txt └── split_dataset.py ├── extract_images.py ├── network_training ├── Classification │ ├── Dual_Net │ │ ├── dist │ │ │ ├── train_classifier_dist.py │ │ │ └── utils.py │ │ └── focal │ │ │ ├── train_classifier_focal.py │ │ │ └── utils.py │ ├── Seq_Net │ │ ├── dist │ │ │ ├── train_classifier_dist_concat_focal.py │ │ │ └── utils_dist_concat_focal.py │ │ └── focal │ │ │ ├── train_classifier_focal_concat_dist.py │ │ │ └── utils_focal_concat_dist.py │ └── Single_Net │ │ ├── train_classifier_dist_focal.py │ │ └── utils_focal_distortion.py └── Regression │ ├── Dual_Net │ ├── dist │ │ ├── train_regressor_dist.py │ │ └── utils_regressor_dist.py │ └── focal │ │ ├── train_regressor_focal.py │ │ └── utils_regressor_focal.py │ ├── Seq_Net │ ├── dist │ │ ├── train_regressor_dist_concat_focal.py │ │ └── utils_regressor_dist_concat_focal.py │ └── focal │ │ ├── train_regressor_focal_concat_dist.py │ │ └── utils_regressor_focal_concat_dist.py │ └── Single_net │ ├── train_regressor_dist_focal.py │ └── utils_regressor_focal_dist.py ├── prediction ├── Classification │ ├── Dual_Net │ │ ├── dist │ │ │ └── predict_classifier_dist_to_textfile.py │ │ └── focal │ │ │ └── predict_classifier_focal_to_textfile.py │ ├── Seq_Net │ │ ├── dist │ │ │ └── predict_classifier_dist_concat_focal_to_textfile.py │ │ └── focal │ │ │ └── predict_classifier_focal_concat_dist_to_textfile.py │ └── Single_net │ │ └── predict_classifier_dist_focal.py └── Regression │ ├── Dual_Net │ ├── dist │ │ └── predict_regressor_dist_to_textfile.py │ └── focal │ │ └── predict_regressor_focal_to_textfile.py │ └── Single_net │ └── predict_regressor_dist_focal_to_textfile.py └── undistortion ├── undistSphIm.m └── undist_from_txt.m /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | -------------------------------------------------------------------------------- /DeepCalib_architectures.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexvbogdan/DeepCalib/b9b5fc3bac8d9845c9d8172b80833c6bc86492a2/DeepCalib_architectures.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepCalib 2 | The implementation of our [2018 CVMP DeepCalib](https://drive.google.com/file/d/1pZgR3wNS6Mvb87W0ixOHmEVV6tcI8d50/view) paper. The supplementary material can be found [here](https://drive.google.com/file/d/1baNhjaYxadjHbMn1huVjsrSJRGSmzFWF/view). 3 | 4 | ## Table of contents 5 | 6 | - [Short description](#short-description) 7 | - [Requirements](#requirements) 8 | - [Dataset generation](#dataset-generation) 9 | - [Training DeepCalib](#training-deepcalib) 10 | - [Camera calibraition](#camera-calibration) 11 | - [Notes](#notes) 12 | - [Different architectures](#different-architectures) 13 | - [Weights](#weights) 14 | - [Undistortion](#undistortion) 15 | - [Citation](#citation) 16 | 17 | ## Short description 18 | We present a novel fully automatic deep learning-based approach works with a single image of general scenes. Our approach builds upon Inception-v3 architecture: our network **automatically estimates the intrinsic parameters of the camera** (focal length and distortion parameter) from a **general single input image**. 19 | 20 | ## Requirements 21 | - Python 2.7 22 | - Keras 2.1 23 | - TensorFlow 1.4 24 | - OpenCV 3.2.1 25 | 26 | ## Dataset generation 27 | We provided the code for the whole data generation pipeline. First you have to download sun360 dataset using this [download script](https://github.com/alexvbogdan/DeepCalib/blob/master/dataset/download_images.py). There also exists a Google drive [link](https://drive.google.com/drive/folders/1ooaYwvNuFd-iEEcmOQHpLunJEmo7b4NM) from which you can download manually. Then, you have to choose whether your dataset is going to have continuous or discrete values. We provide the dataset generation code for both in a dataset [folder](https://github.com/alexvbogdan/DeepCalib/blob/master/dataset/). If you are using this code, please do not forget to [cite](https://scholar.google.co.kr/scholar?hl=en&as_sdt=0%2C5&as_vis=1&q=recognizing+scene+viewpoint+using+panoramic+place+representation&btnG=#d=gs_cit&u=%2Fscholar%3Fq%3Dinfo%3ARJsOQOkTaMEJ%3Ascholar.google.com%2F%26output%3Dcite%26scirp%3D0%26hl%3Den) the paper describing sun360 dataset. 28 | If you are unable to download sun360 dataset, here is a [link](https://vcl.iti.gr/360-dataset/) to another dataset of 360 panoramic images. 29 | 30 | ## Training DeepCalib 31 | To train DeepCalib you need to choose which architecture you want to use (refer to the `Section 3.3` of [our paper](https://drive.google.com/file/d/1pZgR3wNS6Mvb87W0ixOHmEVV6tcI8d50/view)). This repo contains all the training scripts for both classification and regression networks as well as all 3 architectures mentioned in the paper. In both regression and classification [Seq_Net](https://github.com/alexvbogdan/DeepCalib/tree/master/network_training/Classification/Seq_Net) and [Dual_Net](https://github.com/alexvbogdan/DeepCalib/tree/master/network_training/Classification/Dual_Net) folders "dist" and "focal" refer to the netwotks used for distortion parameter and focal length, respectively. All the training codes are available in this [folder](https://github.com/alexvbogdan/DeepCalib/tree/master/network_training). 32 | 33 | ## Camera Calibration 34 | To infer distortion parameter and focal length of a given camera we take a short video, extract the frames and run the prediction on all of them. After that, we take the mean or the median of predicted values and use that as a final result. However, in a slight modification you can use them for a single image prediction as well. Below you can see some of the results of image rectification using parameters obtained from single image calibration. ![Results](https://github.com/alexvbogdan/DeepCalib/blob/master/Results.png) 35 | In [prediction folder](https://github.com/alexvbogdan/DeepCalib/tree/master/prediction) we have the codes for all the networks except for `SeqNet` regression because the weights for this architecture are currently unavailable. We uploaded a simple python script for frame extraction from video sequence. 36 | 37 | ## Notes 38 | 39 | #### Different architectures 40 | For detailed information refer to the `Section 4.2` of [our paper](https://drive.google.com/file/d/1pZgR3wNS6Mvb87W0ixOHmEVV6tcI8d50/view). In short, `SingleNet` (a) is the best network for predicting focal length and distortion parameter in terms of accuracy. In addition, since it is a single network contrary to `DualNet` (b) and `Seqnet` (c), it is computationally cheaper to use the former. ![DeepCalib architectures](https://github.com/alexvbogdan/DeepCalib/blob/master/DeepCalib_architectures.png) 41 | 42 | #### Weights 43 | The weights for our networks can be found [here](https://drive.google.com/file/d/1TYZn-f2z7O0hp_IZnNfZ06ExgU9ii70T/view). We recommend to use `SingleNet` since we experimentally confirmed it outperforms the other ones. The regression weights for `SeqNet` are currently unavailable, although you can train your own. 44 | 45 | #### Undistortion 46 | One way to qualitatively assess the accuracy of predicted parameters is to use those to undistort images that were used to predict the parameters. [Undistoriton](https://github.com/alexvbogdan/DeepCalib/tree/master/undistortion) folder contains MATLAB code to undistort multiple images from .txt file. The format of the .txt file is the following: 1st column contains `path to the image`, 2nd column is `focal length`, 3rd column is `distortion parameter`. Each row corresponds to a single image. With a simple modification you can use it on a single image by giving direct path to it and predicted parameters. However, you need to change only `undist_from_txt.m` file, not the `undistSphIm.m`. 47 | 48 | ## Citation 49 | ``` 50 | @inproceedings{bogdan2018deepcalib, 51 | title={DeepCalib: a deep learning approach for automatic intrinsic calibration of wide field-of-view cameras}, 52 | author={Bogdan, Oleksandr and Eckstein, Viktor and Rameau, Francois and Bazin, Jean-Charles}, 53 | booktitle={Proceedings of the 15th ACM SIGGRAPH European Conference on Visual Media Production}, 54 | year={2018} 55 | } 56 | 57 | @inproceedings{xiao2012recognizing, 58 | title={Recognizing scene viewpoint using panoramic place representation}, 59 | author={Xiao, Jianxiong and Ehinger, Krista A and Oliva, Aude and Torralba, Antonio}, 60 | booktitle={2012 IEEE Conference on Computer Vision and Pattern Recognition}, 61 | year={2012}, 62 | } 63 | ``` 64 | -------------------------------------------------------------------------------- /Results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexvbogdan/DeepCalib/b9b5fc3bac8d9845c9d8172b80833c6bc86492a2/Results.png -------------------------------------------------------------------------------- /dataset/continuous_dataset_generation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import math as m 4 | import time, glob 5 | import my_interpol 6 | import random 7 | import pdb 8 | from numpy.lib.scimath import sqrt as csqrt 9 | random.seed(9001) 10 | np.random.seed(1) 11 | 12 | 13 | def deg2rad(deg): 14 | return deg*m.pi/180 15 | 16 | def getRotationMat(roll, pitch, yaw): 17 | 18 | rx = np.array([1., 0., 0., 0., np.cos(deg2rad(roll)), -np.sin(deg2rad(roll)), 0., np.sin(deg2rad(roll)), np.cos(deg2rad(roll))]).reshape((3, 3)) 19 | ry = np.array([np.cos(deg2rad(pitch)), 0., np.sin(deg2rad(pitch)), 0., 1., 0., -np.sin(deg2rad(pitch)), 0., np.cos(deg2rad(pitch))]).reshape((3, 3)) 20 | rz = np.array([np.cos(deg2rad(yaw)), -np.sin(deg2rad(yaw)), 0., np.sin(deg2rad(yaw)), np.cos(deg2rad(yaw)), 0., 0., 0., 1.]).reshape((3, 3)) 21 | 22 | return np.matmul(rz, np.matmul(ry, rx)) 23 | 24 | def minfocal( u0,v0,xi,xref=1,yref=1): 25 | 26 | fmin = np.sqrt(-(1-xi*xi)*((xref-u0)*(xref-u0) + (yref-v0)*(yref-v0))) 27 | 28 | return fmin * 1.0001 29 | 30 | def diskradius(xi, f): 31 | 32 | return np.sqrt(-(f*f)/(1-xi*xi)) 33 | 34 | 35 | 36 | #----------------constants-------------- 37 | path_to_360_images = 'all_images_new/*.jpg' 38 | list_360_image_paths = glob.glob(path_to_360_images) 39 | 40 | H=299 41 | W=299 42 | u0 = W / 2. 43 | v0 = H / 2. 44 | 45 | grid_x, grid_y = np.meshgrid(range(W), range(H)) 46 | 47 | starttime = time.clock() 48 | for image360_path in list_360_image_paths: #length of your filename list 49 | 50 | image360 = cv2.imread(image360_path) 51 | 52 | ImPano_W = np.shape(image360)[1] 53 | ImPano_H = np.shape(image360)[0] 54 | 55 | 56 | for i in range(30): 57 | x_ref = 1 58 | y_ref = 1 59 | f = random.randint(50,500) 60 | xi = random.uniform(0,1.2) 61 | 62 | fmin = minfocal(u0, v0, xi, x_ref, y_ref) 63 | 64 | # 1. Projection on the camera plane 65 | 66 | X_Cam = np.divide(grid_x- u0, f) 67 | Y_Cam = np.divide(grid_y- v0, f) 68 | 69 | # 2. Projection on the sphere 70 | 71 | AuxVal = np.multiply(X_Cam, X_Cam) + np.multiply(Y_Cam, Y_Cam) 72 | 73 | alpha_cam = np.real(xi + csqrt(1 + np.multiply((1 - xi * xi), AuxVal))) 74 | 75 | alpha_div = AuxVal + 1 76 | 77 | alpha_cam_div = np.divide(alpha_cam, alpha_div) 78 | 79 | X_Sph = np.multiply(X_Cam, alpha_cam_div) 80 | Y_Sph = np.multiply(Y_Cam, alpha_cam_div) 81 | Z_Sph = alpha_cam_div - xi 82 | 83 | # 3. Rotation of the sphere 84 | Rot = [] 85 | Rot.append(((np.random.ranf() - 0.5) * 2) * 10) # roll 86 | Rot.append(((np.random.ranf() - 0.5) * 2) * 15) # pitch 87 | Rot.append(((np.random.ranf() - 0.5) * 2) * 180) # yaw 88 | 89 | r = np.matmul(getRotationMat(Rot[0], Rot[1], Rot[2]), 90 | np.matmul(getRotationMat(0, -90, 45), getRotationMat(0, 90, 90))) 91 | 92 | idx1 = np.array([[0], [0], [0]]) 93 | idx2 = np.array([[1], [1], [1]]) 94 | idx3 = np.array([[2], [2], [2]]) 95 | elems1 = r[:, 0] 96 | elems2 = r[:, 1] 97 | elems3 = r[:, 2] 98 | 99 | x1 = elems1[0] * X_Sph + elems2[0] * Y_Sph + elems3[0] * Z_Sph 100 | y1 = elems1[1] * X_Sph + elems2[1] * Y_Sph + elems3[1] * Z_Sph 101 | z1 = elems1[2] * X_Sph + elems2[2] * Y_Sph + elems3[2] * Z_Sph 102 | 103 | X_Sph = x1 104 | Y_Sph = y1 105 | Z_Sph = z1 106 | 107 | # 4. cart 2 sph 108 | ntheta = np.arctan2(Y_Sph, X_Sph) 109 | nphi = np.arctan2(Z_Sph, np.sqrt(np.multiply(X_Sph, X_Sph) + np.multiply(Y_Sph, Y_Sph))) 110 | 111 | pi = m.pi 112 | 113 | # 5. Sphere to pano 114 | min_theta = -pi 115 | max_theta = pi 116 | min_phi = -pi / 2. 117 | max_phi = pi / 2. 118 | 119 | min_x = 0 120 | max_x = ImPano_W - 1.0 121 | min_y = 0 122 | max_y = ImPano_H - 1.0 123 | 124 | ## for x 125 | a = (max_theta - min_theta) / (max_x - min_x) 126 | b = max_theta - a * max_x # from y=ax+b %% -a; 127 | nx = (1. / a)* (ntheta - b) 128 | 129 | ## for y 130 | a = (min_phi - max_phi) / (max_y - min_y) 131 | b = max_phi - a * min_y # from y=ax+b %% -a; 132 | ny = (1. / a)* (nphi - b) 133 | 134 | # 6. Final step interpolation and mapping 135 | im = np.array(my_interpol.interp2linear(image360, nx, ny), dtype=np.uint8) 136 | 137 | if f < fmin: 138 | r = diskradius(xi, f) 139 | DIM = im.shape 140 | ci = (np.round(DIM[0]/2), np.round(DIM[1]/2)) 141 | xx, yy = np.meshgrid(range(DIM[0])-ci[0], range(DIM[1])-ci[1]) 142 | mask = np.double((np.multiply(xx,xx)+np.multiply(yy,yy)) ncols - 1)) 32 | if x_bad.any(): 33 | x[x_bad] = 0 34 | 35 | # find y values out of range 36 | y_bad = ((y < 0) | (y > nrows - 1)) 37 | if y_bad.any(): 38 | y[y_bad] = 0 39 | 40 | # linear indexing. z must be in 'C' order 41 | ndx = np.floor(y) * ncols + np.floor(x) 42 | ndx = ndx.astype('int32') 43 | 44 | # fix parameters on x border 45 | d = (x == ncols - 1) 46 | x = (x - np.floor(x)) 47 | if d.any(): 48 | x[d] += 1 49 | ndx[d] -= 1 50 | 51 | # fix parameters on y border 52 | d = (y == nrows - 1) 53 | y = (y - np.floor(y)) 54 | if d.any(): 55 | y[d] += 1 56 | ndx[d] -= ncols 57 | 58 | # interpolate 59 | one_minus_t = 1 - y 60 | z_ravel0 = z[:,:,0].ravel() 61 | z_ravel1 = z[:,:,1].ravel() 62 | z_ravel2 = z[:,:,2].ravel() 63 | f0 = (z_ravel0[ndx] * one_minus_t + z_ravel0[ndx + ncols] * y ) * (1 - x) + ( 64 | z_ravel0[ndx + 1] * one_minus_t + z_ravel0[ndx + ncols + 1] * y) * x 65 | f1 = (z_ravel1[ndx] * one_minus_t + z_ravel1[ndx + ncols] * y) * (1 - x) + ( 66 | z_ravel1[ndx + 1] * one_minus_t + z_ravel1[ndx + ncols + 1] * y) * x 67 | f2 = (z_ravel2[ndx] * one_minus_t + z_ravel2[ndx + ncols] * y) * (1 - x) + ( 68 | z_ravel2[ndx + 1] * one_minus_t + z_ravel2[ndx + ncols + 1] * y) * x 69 | f = np.stack([f0,f1,f2], axis=-1) 70 | # Set out of range positions to extrapval 71 | if x_bad.any(): 72 | f[x_bad] = extrapval 73 | if y_bad.any(): 74 | f[y_bad] = extrapval 75 | 76 | return f 77 | -------------------------------------------------------------------------------- /dataset/outdoor/airport.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaxpoynvqmurki.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abhnhfppzmtubv.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afanzeqklagtbf.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afbztoxekfujit.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afddloqfpagejm.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahauzbmansusqz.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_askvbepeztrtfo.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_atdvgzdberyvkn.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awouoctwfnhqsv.jpg 10 | -------------------------------------------------------------------------------- /dataset/outdoor/amphitheatre.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajbqpeheatapua.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqfxsyzpnknutr.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aquzomrdweidez.jpg 4 | -------------------------------------------------------------------------------- /dataset/outdoor/arena.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aanjuibsevjzdd.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aawdyunzlwrbju.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_actaychgdhfvuw.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggxkafxnraetz.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiogjwwbyactjf.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aipjwoxdpyajvs.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aksxqlxyhquceu.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_almudsksmnqbmt.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ammbpnwmpsjxui.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aogjzcsytpvpqy.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apfoauwrzsaooo.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_appqflyfmjnljo.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_asocfvkymfrmsc.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avmybpgrmxpvzr.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avogayguohgsyx.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awxfqeyniyxrcf.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axtcfwqypdvmtj.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azbumluiwzlexx.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azdsecfaieexmp.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzldupqqdulso.jpg 21 | -------------------------------------------------------------------------------- /dataset/outdoor/balcony.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaakdgaakkxbjg.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aamqcjgugrttdc.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaqequqxmbchiu.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aarmxkdkqpswos.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aazixhllntvtni.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aazxhjfzjfcfjb.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abboyuqxjwyvyf.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abbyrczhbudzqg.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abfjypckjitdut.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abjogedykgwird.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abjugunoxggklu.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abmkfdxdgcpwtb.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abmrfoulgeglsa.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_absqedyaeyigmp.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_absvmwxhhuedqk.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aclyunenrwuplu.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_actvrarfjkjajp.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adbusvaugvkszj.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aerordtizrduei.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afmxufdldrlbdh.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghjuoerywrecq.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahakcalwwdyddd.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahdkjtuolpexya.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahfeqepnmvxvey.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahfkfudhxnvthp.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahstvdctlcketr.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahsuiwekguqnwc.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahxwychwegcbeu.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aimjeljirgwgtn.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiontvhcqeurlh.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiqokesxanmujl.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_airrnbducisiaz.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiusjnrjkcutgk.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aizfisqcxinpvq.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajabhbuzyzosep.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajgsqazhlzungf.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajgwsiivqnscrq.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhbmensjhxasn.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhrycarxfdsfi.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhxdreiqezsjn.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajihmcxjslkuop.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtsrvyhrhsdff.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_akciltbxupziox.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_akhomxhxjkfpef.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aohkttucypubtb.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apjvfljjsicevf.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apqfoywzqklaiu.jpg 48 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqwsvmgioykqvl.jpg 49 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzlydufytzgul.jpg 50 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_arvwqdrzpzesqk.jpg 51 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aumnndigcrqshu.jpg 52 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auzizhwoetdlql.jpg 53 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbbiehunpxexd.jpg 54 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbfkvvhmkwibx.jpg 55 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrmbzpbkososx.jpg 56 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrzghjfsjzjel.jpg 57 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awwffyvszgwamw.jpg 58 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awxonvfcbjmmaa.jpg 59 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axabrpntfhpngl.jpg 60 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axaeemjudrovps.jpg 61 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axfbndlwfroiwj.jpg 62 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axgbfdlefgvnyu.jpg 63 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axvtazqppqqefh.jpg 64 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axzfifcarhxzhh.jpg 65 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayfbsqbnohcytf.jpg 66 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azaneesctxlqzh.jpg 67 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azwyycibccxihs.jpg 68 | -------------------------------------------------------------------------------- /dataset/outdoor/boat_deck.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aamvswbkqpmcbw.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aancnwqcgyxhgl.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aarxodzplszprv.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aayovuzbxjucqh.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aazurydfipfwny.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abaothcwkgnrrl.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abcvayjdqiawli.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abfkbizvottnwx.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abjzwmcferuhro.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abnwcaikuwhepk.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abosmdvbhccktk.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_acyhnekttwlwdz.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_affwvyviygjyxm.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgxlqkjpegkuu.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afhdnunsiilaub.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afjbnkqutspgpg.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afkedkgenkystk.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afkjdjlqhczsos.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghncrowkndkof.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghzrntbuqzxeu.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agkstkvrvgyany.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agkzwalsgptuxz.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahakbeejkcofud.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahalwoksidsvij.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahaoiwhwrhxfzk.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahccbmumpypiwr.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahcefgcaqfadpb.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahdfagoelhwemf.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aheomytcpwsdzk.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahfkikuicptxwr.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahismidfdoxfzs.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahivwkgesneads.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahjwulojktwwyn.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahmirdxrwybtha.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahntnuykjgfcxj.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahpwvgdecrzqbt.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahrwopmjdqxbce.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahtbyvcnpnudrj.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aisjbbpklwzcwt.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajpsflsazgpjuc.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqhvmfffuskxs.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajsawznjunqkqo.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajttejtqvzgedb.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtutmmfaahhwk.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aonllekfgbyzyp.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aoomytuyjoqmuf.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aotmeqhsuvezyx.jpg 48 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqbmgudvjbukyf.jpg 49 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqfrzsrnybsvdu.jpg 50 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqvnhfvormazyh.jpg 51 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_arzpfpvplymwxo.jpg 52 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aujmkxzqdifquo.jpg 53 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auksijxdepvbfr.jpg 54 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aunignjwrlohfl.jpg 55 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_autdgciwulhpgr.jpg 56 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_authfhfbjijesx.jpg 57 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auxtrrmmuoxcfk.jpg 58 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auymvbdzmdwaca.jpg 59 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auyxjzhvucaftq.jpg 60 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avgvcytmlwzdbe.jpg 61 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avmcujdmplfgbb.jpg 62 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avmotpstbivuch.jpg 63 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avokmrmljhbnal.jpg 64 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avqvtngovbltmg.jpg 65 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awmdwizdzznmxb.jpg 66 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awurkwefzlptrl.jpg 67 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axdogeyduicjwj.jpg 68 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxddqfogvzgub.jpg 69 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxdonerwjwrbr.jpg 70 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azbvyjmemcfzpn.jpg 71 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azluqnkwhiqoty.jpg 72 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvycwmasbhvnv.jpg 73 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzvagcukrfjou.jpg 74 | -------------------------------------------------------------------------------- /dataset/outdoor/bridge.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aazlyggddoqoyy.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abaznwsxbzczen.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abejyayzgrxfst.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ablowciifyzwsr.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abomwjenilbbda.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abptenmbocwrnd.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afduvjegxojbgc.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgpeejvtujmqo.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgwhmygcrdbdn.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afhwmjmtsznocx.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afislkucttozfb.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agjcnrolnlqykl.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agmawpiybakmst.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agnaxmvpajsugp.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agqgctenfauzfb.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agqssjkcwjuveu.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agrbyjlgeliwdl.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agzjflishkqsgz.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahrcjpdogosxok.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahtazeajwriwnk.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivaxeaanvktzk.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aixagjwipcrigz.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajgjzcbupwfwes.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajpoaxinkmbnkj.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqbqwbqrutpqc.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqdyyatwpizrf.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqpclwrtupuzr.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajrjqaxutvjqon.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajrmbxbetbwzne.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajsigscryljwkj.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajsptznzzlowvv.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajufexlqxioxyj.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqxtwyhdcnxapk.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqxumezbuvvjzb.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqydrubpbcakhr.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzlrdsrjkezlv.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auezywhbulheem.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auqttpukpghajd.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auyvfycfezkiat.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avacmmnnuulzsh.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avakyulurawpjf.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbzruvuczuami.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avcovsninhhriq.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avdohfwjzlihaa.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avgljyeumjjuss.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avgqiylxclnyvv.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avhhkcmgfwzkuf.jpg 48 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avhsjpjbszqdzy.jpg 49 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avimbciiirkvwo.jpg 50 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avksfdzjjnvonv.jpg 51 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avlbgkeqpigmty.jpg 52 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avnzedaoubhjrx.jpg 53 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avorwrczvhohmj.jpg 54 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avrpudyqkbgcqr.jpg 55 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awmseezbzfxiii.jpg 56 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awpsdsjxhpdanu.jpg 57 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrvbmdookdvrm.jpg 58 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrvdkwhdmijsq.jpg 59 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axaiqwpwmlqeni.jpg 60 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axavniwzijarqd.jpg 61 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axdhobymetnuwj.jpg 62 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axdvfhywnxqswk.jpg 63 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axfwcmaveeerfh.jpg 64 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzesiphbeyoun.jpg 65 | -------------------------------------------------------------------------------- /dataset/outdoor/cemetery.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abwmkzdhjzeppb.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aficttoivoyxjo.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghvgzpmapwtid.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agqbsuihljyucq.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agqtwzeopomtmw.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agtbkozkizcgyf.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aharllpbiusddc.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahexgjlptxywzm.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahiigmoqaiflxz.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahpajvomhtokmr.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiozwjjsiamplw.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajdqhtjuybanqh.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajecicmbfhzrjy.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajesezabuwotnp.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhtcndktxbidc.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajijcgwhudxlpd.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqutdvaktujtx.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajumgjnbndelwu.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajuotbrsonhajy.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_antmjizxjldnwm.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ardzxotvxefwpw.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_arpjdhuaksbrxe.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aupzshiaqwofzg.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avfslhopnrmwim.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awoemdbsiwxyxy.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrayzmwgzebhq.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awrfwmaxdwsxqz.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awyfizxsksdpws.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awzoihtedvnimo.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aywhzizlzamveg.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxzprteyewtjs.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzbtarxvgxfop.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azajnedsthsjct.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azfuosxxzrrxhn.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azgpwzhosuarmx.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aznnicdzmrdtzz.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azqpcgmnkvjnrh.jpg 38 | -------------------------------------------------------------------------------- /dataset/outdoor/coast.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaxzoqymmwzsis.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abcbompzakarym.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abdgfkyrlsucgm.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abewlyffcppuub.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abjmldnxlaeipa.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adbalwqzcccqgy.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgwzkdgmvrxnv.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afscqnhvouixrp.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfobtmwinaaxb.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggcwwglgjyarv.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghwpxamrjrlte.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agjydocjddvvpy.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aglqphqnonpbhv.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agnvgsqtstesfd.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agppnzksgjyqrk.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agrwwyvmgvwnlb.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahbcxguuitikhx.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahbhswiyabeaak.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aheufrdribnqbu.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahfbguxbotnkle.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aioesnkeeebroq.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiohjxelycznvx.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiwgonfthpcweb.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aixetllrwxjrpd.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajdywkypkfygmq.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajeqpukazedvsq.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhhhwjlnfifjw.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajrxxmmkjfmkcd.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajswcjomjywoeh.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajsxwmkrfawpds.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtnvzzpsjdzre.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajypvwvsibypho.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajypwjfywhmhvg.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apnhxpfgeotlvy.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apxxkwijarouzj.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqfnvybezbdpzp.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqgpjsimflqcdd.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqupiuvicrxpxu.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aquxfcvlsyhioo.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqvvlaihdgxlgz.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqxvxddcvxlhzr.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqxxwbymhltcoy.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzatswanknvdu.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzixxdftiuoki.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzmrchcsjnwlc.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_asrajlymfkcohe.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aucwmugpigefwo.jpg 48 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aujhljrrlroysu.jpg 49 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auyfxdzbbykftq.jpg 50 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avaygirhjhpmez.jpg 51 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbxfrykcsaxud.jpg 52 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbytddlkjqcmx.jpg 53 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avdiqamzajcanh.jpg 54 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avkblbthnmbbwd.jpg 55 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avlekfgqbuxxvf.jpg 56 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avoeojtzpbejge.jpg 57 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avpgsrhcgspucs.jpg 58 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awmxzcnmnsoipl.jpg 59 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awqmwbnibcdmas.jpg 60 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awroxcejhpltuf.jpg 61 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awxeypwuxbvwfg.jpg 62 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axdwtltwwpwbep.jpg 63 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axfqkzgkerelum.jpg 64 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayuwbpjttdwvnq.jpg 65 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzefvnzxsjdmp.jpg 66 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzpeqohhifgvu.jpg 67 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzyipdlbnukjt.jpg 68 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azbavapqoteais.jpg 69 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azbmymhjyikedx.jpg 70 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azcxypuqdwsonz.jpg 71 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azhimvddlotlpx.jpg 72 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azhjhgfuccyykg.jpg 73 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azqvtlzifpeyec.jpg 74 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azrnbbpwknlzzs.jpg 75 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvmesyliejztc.jpg 76 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvzcckqearuke.jpg 77 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzmijdeqltsks.jpg 78 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzyqtvsmaictb.jpg 79 | -------------------------------------------------------------------------------- /dataset/outdoor/construction_site.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agoqkgoopnphkw.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajswmbkousqplj.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtbxuifhjtedt.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awnhemhzlwsmgd.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awvtpsllxmcqab.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awxaizgvlzumpa.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azauinclfmoqgf.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azegdmhewjudtx.jpg 9 | -------------------------------------------------------------------------------- /dataset/outdoor/desert.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aannudzqvxtxgh.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abcjysozhvdlyn.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ablvmlpxrbolbv.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adbwoyspvwzawp.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adcmwkwvdfbfgb.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aesqwcqvyimtfg.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afdvjxqrlbaqsh.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggjiuoiaigliw.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghlbktluwxrwu.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agviohueajdakw.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agxihwwmzvyfxk.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agxylkzyyjuhni.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agybombccjqqsz.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agypjcmivsbvlg.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aheesjwmsmgucw.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahhiijrdoggjyx.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahjgdvytlnhpmt.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahmbcendhjewzh.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahseoahedjlmtq.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahsqmiagvfgxlf.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivorffnrpmqjb.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivsvpxgvafgad.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajfiosymwoqzze.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqlkbuqihkwih.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqowasfzckesn.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_amhohsocajcxef.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apiglwoufpxbtx.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_apquyxozzvbvum.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_armwehxplmjiog.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aunvikjyidocaa.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aunweomufxuoef.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auseslbmwwnmqi.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auyeqaxzkjqlsc.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avdkbvaiouyrvb.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avhnprwqzvosia.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avifuyidpgffsw.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avkgsfrakyajih.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avpgiewmsmlgtv.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awuywhppwvmtso.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awwoxnktzsxvbh.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awzzumhmlyaafm.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxrzexmyotpvr.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azdbbdtlovyekl.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azddcrmnkezuxy.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azgcctmuhqosox.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azptwlqlogbilm.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvctujqluxcno.jpg 48 | -------------------------------------------------------------------------------- /dataset/outdoor/garden.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajrjdmbozeblkz.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqznkfgwjebqzl.jpg 3 | -------------------------------------------------------------------------------- /dataset/outdoor/gulch.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiyqxzzheozgsx.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzobkfrlxxazu.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avjnwcltflmlvv.jpg 4 | -------------------------------------------------------------------------------- /dataset/outdoor/highway.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaajunfoiykhrc.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abkyajgflgejni.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afktfptpkrapsh.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggsaekdpuxukz.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahijguhcvfbwil.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahlwkmojkwmopo.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiodcmtykumlxx.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiwuedcbusmoub.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aixzdjcmupsjvd.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiyastrgkxikjw.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhvyruvzmauch.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awtgoykrzdmbpf.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awvwqdbvoqebdo.jpg 14 | -------------------------------------------------------------------------------- /dataset/outdoor/jetty.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aabdukwriwqwsz.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abbapzevmsvayn.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afadytkqenunbl.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfnbkvyynajen.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggvjvinwrelgy.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agkmwjyzrzbfae.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agoxzmnsyutwuu.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agtiudredulnbc.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agtrwjdsfpdjge.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahcblewkhfurwm.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahcifqzpvqrogl.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahppqlwvuzwths.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiojonzuecpotq.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivbwugxhxpcse.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivxsfjtricrcb.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajryqmuadivuig.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtiaisxnkeoln.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aulbdxubriolob.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auzjdyyhmoxeyu.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avcmrzlrrxqrhq.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awsthvnnbcucyu.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awvaqxrsmaqmnd.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awzawdqngxujjy.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awzobmduyqickc.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axcrabfwdwecmz.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axevtmmejmjguf.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayudnoavsrkwmr.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aywasgbhcshbbz.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayyoieymcplwag.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azcuclhlduykkj.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azdpdscpaqcfhh.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azgdkmxfdsfyny.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azqiwcxocbbxhq.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvyqjmxaemobr.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azytlvjuffhtyn.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzjscqujvthzn.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azzzmugxmixjsq.jpg 38 | -------------------------------------------------------------------------------- /dataset/outdoor/parking_lot.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaaaqfuipapzbc.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaacvcisjngpmt.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aazafgcdymlylt.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abgpsiostgrhfv.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abiaqawxgjlqxo.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abldzftdjcwfjt.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abliosercjuecm.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abotablpwlukmj.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aevfdybxgmvigw.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afdwvfaloaniwf.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afenzhlqrzxcca.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgitjjtogdtey.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afgsmaccbgnykm.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afhmtxacxpoxeg.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aftxdyuohypqsl.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfahhoiduehhz.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfckmdybysjji.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfsenhmyfaxrn.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aghsspcdmexops.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agiyzytilprmoz.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agjwftmeplbxzh.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agkvmffbylqbxi.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agmzrvycragrrd.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agpdlafdvoudzi.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agvpjhthdlkndx.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agxvyuepuzmxwe.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agzuscfxohipqk.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahcmdahyjxazlp.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahdhmltgngobuu.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahfnbxxiryrjeu.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahqolscgwiyfhn.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahqzepkyeeivod.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivcldpcczdglb.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiybwfezfmwkwn.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aizbyxgghlpeau.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajbeixvbyzxnzz.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajdgksosmnfdbp.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajfhljhpzueffu.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajgjzibdngrsef.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajihbnzinwjnbl.jpg 41 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajuotrqqffjwkz.jpg 42 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajutyciamzamkv.jpg 43 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajvjnhbjdewrrf.jpg 44 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajyjqulrwaxtbf.jpg 45 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_alxqxhcbdzchri.jpg 46 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aunesjqkclvvxz.jpg 47 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auxhpuwqhnxyyo.jpg 48 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auzwachcptocwg.jpg 49 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avcukmjsexipen.jpg 50 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avnhgozaosxzcw.jpg 51 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avpovzbevarwjs.jpg 52 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awphvaiabdgxju.jpg 53 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awtdksztdaqscz.jpg 54 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awtybgchqtupvy.jpg 55 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awuxkduxuohksm.jpg 56 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awzevomanrabtc.jpg 57 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axelgrxwuzaquu.jpg 58 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aywoasynoalhwc.jpg 59 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxvpvlfmamlop.jpg 60 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azaodfigebsudc.jpg 61 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azbwcqkduwhbvq.jpg 62 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azcqwwgomvnbdj.jpg 63 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azohmurlqddawf.jpg 64 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azxkpfjeiozuzf.jpg 65 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azxturfsbittlr.jpg 66 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azyzqvggaqgbha.jpg 67 | -------------------------------------------------------------------------------- /dataset/outdoor/patio.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaabmmjtuzjbgl.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_abztawtzuemvtn.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afdinnytzxfkkg.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agjtyzgpamtknt.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahmbhxveqdzitg.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aixapjxymccuje.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_akdclekndxpqzj.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqujxqprswusvt.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqvwytuiyiarpf.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqwnjwnygvgmjs.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awprvfpdvfwyes.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awvftaftfznapq.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayuvgqzxzipugt.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aywutibulwzusj.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayxntmhypvmoig.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azwdhadgdyjhzv.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azztqnkwidzyxh.jpg 18 | -------------------------------------------------------------------------------- /dataset/outdoor/skatepark.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afeocuktwxxbmh.jpg 2 | -------------------------------------------------------------------------------- /dataset/outdoor/sports_field.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aabkcbtztufsci.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afeueozrptmxrc.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agfasnokkqxlhg.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agognafacgcddv.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahlegbfqidaitn.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aivecidleeejkf.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajckaburtnfukc.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqdhulnvxeawjl.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aredgotnmolewi.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awuqagkqhikstc.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayzriqhxhmmtls.jpg 12 | -------------------------------------------------------------------------------- /dataset/outdoor/swimming_pool.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aammxhozxsnrme.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aamwngxburzzpy.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aaqcmrkbniacku.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aerouhyfbouert.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afeeaxargitjsn.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afehoiqjypadqq.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afkjjfsccjruee.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agdzmdkzzxcfyu.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agnetuehucmrya.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agofmrwvvlzrqb.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agzyamrwsvrlyn.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahcjkyyemwctqb.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiortyykqpiguv.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajhhsxnxkxgpif.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajputfegwlhlps.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtspsrqkidaoa.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajuyxhbxvctltn.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqazwcbdzccyco.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqzmwzpnzjpgis.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_augxvfuejbyqxt.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aumtswgocptvho.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avbozbjqwdxugu.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avecehwmtwmekz.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avifvpsiawramp.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avoehccuvhvrpj.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avrziwvcbwiuri.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awmttzopmrdlgw.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axaoagenrilita.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axccnqqjzijidi.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aytxzrhsoarttg.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayuftwhumhitlx.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayybkyzujpknhe.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azcjklijupegry.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azecigyflfodeh.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azttjzkcjlfvpd.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azyzqgsmbfellv.jpg 37 | -------------------------------------------------------------------------------- /dataset/outdoor/train_station_or_track.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aacoparrocssgc.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aanbezyxfwhedy.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adbmbckbomzgrc.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_adxczlvjzyvruu.jpg 5 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aeuqragleyjzfa.jpg 6 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_afaixyphvevccb.jpg 7 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aggrtlmywelpno.jpg 8 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aglilidynufszl.jpg 9 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahajbfjqkatgvt.jpg 10 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahggqewqbstynw.jpg 11 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahxgfxexvolmpr.jpg 12 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aidvgjatnseyje.jpg 13 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ainnsmrancmmux.jpg 14 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aioedwhfgcfpdr.jpg 15 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aiygntgvnxjyyp.jpg 16 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajbqwmtetcheby.jpg 17 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajfozlybiogkph.jpg 18 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajfvpypouhmeok.jpg 19 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajqeirnelzcgaj.jpg 20 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtyvvwrcaumzi.jpg 21 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ajtzzaaemnrwpi.jpg 22 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_akihjzmvyrzkuj.jpg 23 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_akmhcyfowdpwgn.jpg 24 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_amvfhxcbgtykus.jpg 25 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_amviiqvvblwhxt.jpg 26 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aqyucauonujbsm.jpg 27 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_areohelddwowci.jpg 28 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_asetanyzascuih.jpg 29 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_auzvriwdrgkprw.jpg 30 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_avqxqykrwmzzlo.jpg 31 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awojlefwccgoye.jpg 32 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_awvjburptatval.jpg 33 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_axbzfmllalrbma.jpg 34 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aycitwbuxjwszc.jpg 35 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayyxgeutxkjhkg.jpg 36 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ayyyrkjnpkwvca.jpg 37 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azakilqvbwducu.jpg 38 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_aznuabtonobiic.jpg 39 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azveuldjbxzsgr.jpg 40 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azxhgohinvuthf.jpg 41 | -------------------------------------------------------------------------------- /dataset/outdoor/underwater.txt: -------------------------------------------------------------------------------- 1 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_agplcuzbmjzsvk.jpg 2 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_ahgehlmmltdoxz.jpg 3 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_areitujkgzlaso.jpg 4 | http://sun360.csail.mit.edu/Images/pano9104x4552/pano_azvmqckzjbzjsd.jpg 5 | -------------------------------------------------------------------------------- /dataset/split_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | 4 | IMAGE_FILE_PATH_DISTORTED = "path to your dataset" 5 | 6 | output_log = IMAGE_FILE_PATH_DISTORTED + "train/" 7 | if not os.path.exists(output_log): 8 | os.makedirs(output_log) 9 | 10 | output_log = IMAGE_FILE_PATH_DISTORTED + "test/" 11 | if not os.path.exists(output_log): 12 | os.makedirs(output_log) 13 | 14 | output_log = IMAGE_FILE_PATH_DISTORTED + "valid/" 15 | if not os.path.exists(output_log): 16 | os.makedirs(output_log) 17 | 18 | paths = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 19 | paths.sort() 20 | for i, path in enumerate(paths[:int(len(paths)*0.8)]): 21 | os.rename(path,IMAGE_FILE_PATH_DISTORTED +'train/'+os.path.basename(path)) 22 | if i%10000 == 0: 23 | print i, '|', int(len(paths)*0.8) 24 | print 'train done' 25 | for path in paths[int(len(paths)*0.8):int(len(paths)*0.9)]: 26 | os.rename(path,IMAGE_FILE_PATH_DISTORTED +'test/'+os.path.basename(path)) 27 | for path in paths[int(len(paths)*0.9):]: 28 | os.rename(path,IMAGE_FILE_PATH_DISTORTED +'valid/'+os.path.basename(path)) 29 | -------------------------------------------------------------------------------- /extract_images.py: -------------------------------------------------------------------------------- 1 | import cv2, sys 2 | import numpy as np 3 | import glob 4 | import pdb 5 | 6 | # path to where you want to save extracted frames 7 | SAVE_PATH = "" 8 | 9 | # Path to your video file 10 | filename = "" 11 | 12 | 13 | def video_to_frames(video_filename): 14 | source_video = cv2.VideoCapture(video_filename) 15 | n_frames = source_video.get(cv2.CAP_PROP_FRAME_COUNT) 16 | i = 0 17 | while i <= n_frames: 18 | ret, frame = source_video.read() 19 | if ret: 20 | cv2.imwrite(SAVE_PATH + "frame_" + str(i) + ".jpg", frame) 21 | else: 22 | i += 1 23 | continue 24 | 25 | i += 1 26 | 27 | video_to_frames(filename) 28 | -------------------------------------------------------------------------------- /network_training/Classification/Dual_Net/dist/train_classifier_dist.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input 9 | from utils import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.allow_growth = True 20 | config.allow_soft_placement = True 21 | set_session(tf.Session(config=config)) 22 | 23 | model_name = 'model_multi_class/' 24 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 25 | # Save 26 | output_folder = SAVE + model_name 27 | if not os.path.exists(output_folder): 28 | os.makedirs(output_folder) 29 | 30 | output_log = output_folder + "Log/" 31 | if not os.path.exists(output_log): 32 | os.makedirs(output_log) 33 | 34 | output_weight = output_folder + "Best/" 35 | if not os.path.exists(output_weight): 36 | os.makedirs(output_weight) 37 | 38 | # training parameters 39 | batch_size = 64 40 | nb_epoch = 10000 41 | 42 | IMAGE_FILE_PATH_DISTORTED = "" 43 | 44 | classes = list(np.arange(0, 61, 1) / 50.) 45 | 46 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 47 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED+'train/' + "*.jpg") 48 | paths_train.sort() 49 | parameters = [] 50 | labels_train = [] 51 | for path in paths_train: 52 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 53 | parameters.append(curr_parameter) 54 | curr_class = classes.index(curr_parameter) 55 | labels_train.append(curr_class) 56 | 57 | c = list(zip(paths_train, labels_train)) 58 | random.shuffle(c) 59 | paths_train, labels_train = zip(*c) 60 | paths_train, labels_train = list(paths_train), list(labels_train) 61 | 62 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 63 | paths_valid.sort() 64 | parameters = [] 65 | labels_valid = [] 66 | for path in paths_valid: 67 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 68 | parameters.append(curr_parameter) 69 | curr_class = classes.index(curr_parameter) 70 | labels_valid.append(curr_class) 71 | 72 | c = list(zip(paths_valid, labels_valid)) 73 | random.shuffle(c) 74 | paths_valid, labels_valid = zip(*c) 75 | paths_valid, labels_valid = list(paths_valid), list(labels_valid) 76 | 77 | 78 | return paths_train, labels_train, paths_valid, labels_valid 79 | 80 | 81 | paths_train, labels_train, paths_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 82 | 83 | print(len(paths_train), 'train samples') 84 | print(len(paths_valid), 'valid samples') 85 | 86 | with tf.device('/gpu:0'): 87 | input_shape = (299, 299, 3) 88 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 89 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 90 | phi_features = phi_model.output 91 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 92 | final_output_phi = Dense(len(classes), activation='softmax', name='fc181-phi')(phi_flattened) 93 | 94 | layer_index = 0 95 | for layer in phi_model.layers: 96 | layer.name = layer.name + "_phi" 97 | 98 | model = Model(input=main_input, output=final_output_phi) 99 | 100 | learning_rate = 10 ** -5 101 | adam = optimizers.Adam(lr=learning_rate) 102 | model.compile(loss='categorical_crossentropy', 103 | optimizer=adam, 104 | metrics=['accuracy'] 105 | ) 106 | model.summary() 107 | model_json = phi_model.to_json() 108 | 109 | with open(output_folder + "model.json", "w") as json_file: 110 | json_file.write(model_json) 111 | 112 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 113 | 114 | tensorboard = TensorBoard(log_dir=output_log) 115 | 116 | checkpointer = CustomModelCheckpoint( 117 | model_for_saving=model, 118 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 119 | save_best_only=True, 120 | monitor='val_loss', 121 | save_weights_only=True 122 | ) 123 | 124 | generator_training = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 125 | preprocess_func=preprocess_input, shuffle=True).generate(paths_train, 126 | labels_train, 127 | len(classes)) 128 | generator_valid = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 129 | preprocess_func=preprocess_input, shuffle=True).generate(paths_valid, 130 | labels_valid, 131 | len(classes)) 132 | 133 | # training loop 134 | model.fit_generator( 135 | generator=generator_training, 136 | steps_per_epoch=(len(paths_train) // batch_size), 137 | epochs=nb_epoch, 138 | validation_data=generator_valid, 139 | validation_steps=(len(paths_valid) // batch_size), 140 | callbacks=[tensorboard, checkpointer], 141 | use_multiprocessing=True, 142 | workers=2, 143 | #verbose=3 144 | ) 145 | -------------------------------------------------------------------------------- /network_training/Classification/Dual_Net/focal/train_classifier_focal.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input 9 | from utils import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.allow_growth = True 20 | config.allow_soft_placement = True 21 | set_session(tf.Session(config=config)) 22 | 23 | model_name = 'model_multi_class/' 24 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 25 | # Save 26 | output_folder = SAVE + model_name 27 | if not os.path.exists(output_folder): 28 | os.makedirs(output_folder) 29 | 30 | output_log = output_folder + "Log/" 31 | if not os.path.exists(output_log): 32 | os.makedirs(output_log) 33 | 34 | output_weight = output_folder + "Best/" 35 | if not os.path.exists(output_weight): 36 | os.makedirs(output_weight) 37 | 38 | # training parameters 39 | batch_size = 64 40 | nb_epoch = 10000 41 | 42 | IMAGE_FILE_PATH_DISTORTED = "" 43 | 44 | classes_focal = list(np.arange(40, 501, 10))# focal 45 | 46 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 47 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED+'train/' + "*.jpg") 48 | paths_train.sort() 49 | parameters = [] 50 | labels_train = [] 51 | 52 | for path in paths_train: 53 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 54 | parameters.append(curr_parameter) 55 | curr_class = classes_focal.index(curr_parameter) 56 | labels_train.append(curr_class) 57 | 58 | c = list(zip(paths_train, labels_train)) 59 | random.shuffle(c) 60 | paths_train, labels_train = zip(*c) 61 | paths_train, labels_train = list(paths_train), list(labels_train) 62 | 63 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 64 | paths_valid.sort() 65 | parameters = [] 66 | labels_valid = [] 67 | 68 | for path in paths_valid: 69 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 70 | parameters.append(curr_parameter) 71 | curr_class = classes_focal.index(curr_parameter) 72 | labels_valid.append(curr_class) 73 | 74 | c = list(zip(paths_valid, labels_valid)) 75 | random.shuffle(c) 76 | paths_valid, labels_valid = zip(*c) 77 | paths_valid, labels_valid = list(paths_valid), list(labels_valid) 78 | 79 | 80 | return paths_train, labels_train, paths_valid, labels_valid 81 | 82 | 83 | paths_train, labels_train, paths_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 84 | 85 | print(len(paths_train), 'train samples') 86 | print(len(paths_valid), 'valid samples') 87 | 88 | with tf.device('/gpu:1'): 89 | input_shape = (299, 299, 3) 90 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 91 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape,pooling='avg') 92 | phi_features = phi_model.output 93 | # phi_flattened = Flatten(name='phi-flattened')(phi_features) 94 | final_output_phi = Dense(len(classes_focal), activation='softmax', name='fc181-phi')(phi_features) 95 | 96 | layer_index = 0 97 | for layer in phi_model.layers: 98 | layer.name = layer.name + "_phi" 99 | 100 | model = Model(input=main_input, output=final_output_phi) 101 | 102 | learning_rate = 10 ** -5 103 | adam = optimizers.Adam(lr=learning_rate) 104 | model.compile(loss='categorical_crossentropy', 105 | optimizer=adam, 106 | metrics=['accuracy'] 107 | ) 108 | model.summary() 109 | model_json = phi_model.to_json() 110 | 111 | with open(output_folder + "model.json", "w") as json_file: 112 | json_file.write(model_json) 113 | 114 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 115 | 116 | tensorboard = TensorBoard(log_dir=output_log) 117 | 118 | checkpointer = CustomModelCheckpoint( 119 | model_for_saving=model, 120 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 121 | save_best_only=True, 122 | monitor='val_loss', 123 | save_weights_only=True 124 | ) 125 | 126 | generator_training = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 127 | preprocess_func=preprocess_input, shuffle=True).generate(paths_train, 128 | labels_train, 129 | len(classes_focal)) 130 | generator_valid = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 131 | preprocess_func=preprocess_input, shuffle=True).generate(paths_valid, 132 | labels_valid, 133 | len(classes_focal)) 134 | 135 | # training loop 136 | model.fit_generator( 137 | generator=generator_training, 138 | steps_per_epoch=(len(paths_train) // batch_size), # 29977 139 | epochs=nb_epoch, 140 | validation_data=generator_valid, 141 | validation_steps=(len(paths_valid) // batch_size), 142 | callbacks=[tensorboard, checkpointer], 143 | use_multiprocessing=True, 144 | workers=2, 145 | # verbose=3 146 | ) 147 | -------------------------------------------------------------------------------- /network_training/Regression/Dual_Net/dist/train_regressor_dist.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard, LearningRateScheduler 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input 9 | from utils_regressor_dist import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob, math 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.allow_growth = True 20 | config.allow_soft_placement = True 21 | set_session(tf.Session(config=config)) 22 | 23 | model_name = 'model_multi_class/' 24 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 25 | # Save 26 | output_folder = SAVE + model_name 27 | if not os.path.exists(output_folder): 28 | os.makedirs(output_folder) 29 | 30 | output_log = output_folder + "Log/" 31 | if not os.path.exists(output_log): 32 | os.makedirs(output_log) 33 | 34 | output_weight = output_folder + "Best/" 35 | if not os.path.exists(output_weight): 36 | os.makedirs(output_weight) 37 | 38 | # training parameters 39 | batch_size = 64 40 | nb_epoch = 10000 41 | 42 | IMAGE_FILE_PATH_DISTORTED = "" 43 | 44 | focal_start = 40 45 | focal_end = 500 46 | dist_end = 1.2 47 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 48 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 49 | 50 | 51 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 52 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'train/' + "*.jpg") 53 | paths_train.sort() 54 | parameters = [] 55 | labels_focal_train = [] 56 | for path in paths_train: 57 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 58 | labels_focal_train.append((curr_parameter - focal_start*1.) / (focal_end*1.+1. - focal_start*1.)) #normalize bewteen 0 and 1 59 | labels_distortion_train = [] 60 | for path in paths_train: 61 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 62 | labels_distortion_train.append(curr_parameter/1.2) 63 | 64 | c = list(zip(paths_train, labels_focal_train,labels_distortion_train)) 65 | random.shuffle(c) 66 | paths_train, labels_focal_train,labels_distortion_train = zip(*c) 67 | paths_train, labels_focal_train, labels_distortion_train = list(paths_train), list(labels_focal_train), list(labels_distortion_train) 68 | labels_train = [list(a) for a in zip(labels_focal_train, labels_distortion_train)] 69 | 70 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 71 | paths_valid.sort() 72 | parameters = [] 73 | labels_focal_valid = [] 74 | for path in paths_valid: 75 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 76 | labels_focal_valid.append((curr_parameter-focal_start*1.)/(focal_end*1.+1.-focal_start*1.)) #normalize bewteen 0 and 1 77 | labels_distortion_valid = [] 78 | for path in paths_valid: 79 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 80 | labels_distortion_valid.append(curr_parameter/1.2) 81 | 82 | c = list(zip(paths_valid, labels_focal_valid, labels_distortion_valid)) 83 | random.shuffle(c) 84 | paths_valid, labels_focal_valid, labels_distortion_valid = zip(*c) 85 | paths_valid, labels_focal_valid, labels_distortion_valid = list(paths_valid), list(labels_focal_valid), list(labels_distortion_valid) 86 | labels_valid = [list(a) for a in zip(labels_focal_valid, labels_distortion_valid)] 87 | 88 | return paths_train, labels_train, paths_valid, labels_valid 89 | 90 | 91 | paths_train, labels_train, paths_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 92 | 93 | print(len(paths_train), 'train samples') 94 | print(len(paths_valid), 'valid samples') 95 | 96 | with tf.device('/gpu:0'): 97 | input_shape = (299, 299, 3) 98 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 99 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 100 | phi_features = phi_model.output 101 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 102 | final_output_distortion = Dense(1, activation='sigmoid', name='output_distortion')(phi_flattened) 103 | 104 | layer_index = 0 105 | for layer in phi_model.layers: 106 | layer.name = layer.name + "_phi" 107 | 108 | model = Model(input=main_input, output=final_output_distortion) 109 | model.load_weights('logs/20180528-005731/model_multi_class/Best/weights_05_0.01.h5') 110 | 111 | learning_rate = 10 ** -6 112 | adam = optimizers.Adam(lr=learning_rate) 113 | model.compile(loss='logcosh', 114 | optimizer=adam, 115 | metrics=['logcosh'] 116 | ) 117 | model.summary() 118 | model_json = phi_model.to_json() 119 | 120 | with open(output_folder + "model.json", "w") as json_file: 121 | json_file.write(model_json) 122 | 123 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 124 | 125 | tensorboard = TensorBoard(log_dir=output_log) 126 | 127 | checkpointer = CustomModelCheckpoint( 128 | model_for_saving=model, 129 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 130 | save_best_only=True, 131 | monitor='val_loss', 132 | save_weights_only=True 133 | ) 134 | generator_training = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 135 | preprocess_func=preprocess_input, shuffle=True).generate(paths_train, 136 | labels_train) 137 | generator_valid = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 138 | preprocess_func=preprocess_input, shuffle=True).generate(paths_valid, 139 | labels_valid) 140 | 141 | 142 | model.fit_generator( 143 | generator=generator_training, 144 | steps_per_epoch=(len(paths_train) // batch_size), 145 | epochs=nb_epoch, 146 | validation_data=generator_valid, 147 | validation_steps=(len(paths_valid) // batch_size), 148 | callbacks=[tensorboard, checkpointer], 149 | use_multiprocessing=True, 150 | workers=2, 151 | #verbose=3 152 | ) 153 | -------------------------------------------------------------------------------- /network_training/Regression/Dual_Net/focal/train_regressor_focal.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard, LearningRateScheduler 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input 9 | from utils_regressor_focal import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob, math 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.allow_growth = True 20 | config.allow_soft_placement = True 21 | set_session(tf.Session(config=config)) 22 | 23 | model_name = 'model_multi_class/' 24 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 25 | # Save 26 | output_folder = SAVE + model_name 27 | if not os.path.exists(output_folder): 28 | os.makedirs(output_folder) 29 | 30 | output_log = output_folder + "Log/" 31 | if not os.path.exists(output_log): 32 | os.makedirs(output_log) 33 | 34 | output_weight = output_folder + "Best/" 35 | if not os.path.exists(output_weight): 36 | os.makedirs(output_weight) 37 | 38 | # training parameters 39 | batch_size = 64 40 | nb_epoch = 10000 41 | 42 | IMAGE_FILE_PATH_DISTORTED = "" 43 | 44 | focal_start = 40 45 | focal_end = 500 46 | dist_end = 1.2 47 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 48 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 49 | 50 | 51 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 52 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'train/' + "*.jpg") 53 | paths_train.sort() 54 | parameters = [] 55 | labels_focal_train = [] 56 | for path in paths_train: 57 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 58 | labels_focal_train.append((curr_parameter - focal_start*1.) / (focal_end*1.+1. - focal_start*1.)) #normalize bewteen 0 and 1 59 | labels_distortion_train = [] 60 | for path in paths_train: 61 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 62 | labels_distortion_train.append(curr_parameter/1.2) 63 | 64 | c = list(zip(paths_train, labels_focal_train,labels_distortion_train)) 65 | random.shuffle(c) 66 | paths_train, labels_focal_train,labels_distortion_train = zip(*c) 67 | paths_train, labels_focal_train, labels_distortion_train = list(paths_train), list(labels_focal_train), list(labels_distortion_train) 68 | labels_train = [list(a) for a in zip(labels_focal_train, labels_distortion_train)] 69 | 70 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 71 | paths_valid.sort() 72 | parameters = [] 73 | labels_focal_valid = [] 74 | for path in paths_valid: 75 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 76 | labels_focal_valid.append((curr_parameter-focal_start*1.)/(focal_end*1.+1.-focal_start*1.)) #normalize bewteen 0 and 1 77 | labels_distortion_valid = [] 78 | for path in paths_valid: 79 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 80 | labels_distortion_valid.append(curr_parameter/1.2) 81 | 82 | c = list(zip(paths_valid, labels_focal_valid, labels_distortion_valid)) 83 | random.shuffle(c) 84 | paths_valid, labels_focal_valid, labels_distortion_valid = zip(*c) 85 | paths_valid, labels_focal_valid, labels_distortion_valid = list(paths_valid), list(labels_focal_valid), list(labels_distortion_valid) 86 | labels_valid = [list(a) for a in zip(labels_focal_valid, labels_distortion_valid)] 87 | 88 | return paths_train, labels_train, paths_valid, labels_valid 89 | 90 | 91 | paths_train, labels_train, paths_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 92 | 93 | print(len(paths_train), 'train samples') 94 | print(len(paths_valid), 'valid samples') 95 | 96 | with tf.device('/gpu:1'): 97 | input_shape = (299, 299, 3) 98 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 99 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 100 | phi_features = phi_model.output 101 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 102 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_flattened) 103 | 104 | layer_index = 0 105 | for layer in phi_model.layers: 106 | layer.name = layer.name + "_phi" 107 | 108 | model = Model(input=main_input, output=final_output_focal) 109 | model.load_weights('weights_05_0.01.h5') 110 | 111 | learning_rate = 10 ** -6 112 | 113 | adam = optimizers.Adam(lr=learning_rate) 114 | model.compile(loss='logcosh', 115 | optimizer=adam, 116 | metrics=['logcosh'] 117 | ) 118 | model.summary() 119 | model_json = phi_model.to_json() 120 | 121 | with open(output_folder + "model.json", "w") as json_file: 122 | json_file.write(model_json) 123 | 124 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 125 | 126 | tensorboard = TensorBoard(log_dir=output_log) 127 | 128 | checkpointer = CustomModelCheckpoint( 129 | model_for_saving=model, 130 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 131 | save_best_only=True, 132 | monitor='val_loss', 133 | save_weights_only=True 134 | ) 135 | generator_training = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 136 | preprocess_func=preprocess_input, shuffle=True).generate(paths_train, 137 | labels_train) 138 | generator_valid = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 139 | preprocess_func=preprocess_input, shuffle=True).generate(paths_valid, 140 | labels_valid) 141 | 142 | # training loop 143 | model.fit_generator( 144 | generator=generator_training, 145 | steps_per_epoch=(len(paths_train) // batch_size), # 29977 146 | epochs=nb_epoch, 147 | validation_data=generator_valid, 148 | validation_steps=(len(paths_valid) // batch_size), 149 | callbacks=[tensorboard, checkpointer], 150 | use_multiprocessing=True, 151 | workers=2, 152 | #verbose=3 153 | ) 154 | 155 | -------------------------------------------------------------------------------- /network_training/Regression/Seq_Net/dist/train_regressor_dist_concat_focal.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input, Concatenate 9 | from utils_regressor_dist_concat_focal import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | model_name = 'model_multi_class/' 19 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 20 | # Save 21 | output_folder = SAVE + model_name 22 | if not os.path.exists(output_folder): 23 | os.makedirs(output_folder) 24 | 25 | output_log = output_folder + "Log/" 26 | if not os.path.exists(output_log): 27 | os.makedirs(output_log) 28 | 29 | output_weight = output_folder + "Best/" 30 | if not os.path.exists(output_weight): 31 | os.makedirs(output_weight) 32 | 33 | # training parameters 34 | batch_size = 64 35 | nb_epoch = 10000 36 | 37 | IMAGE_FILE_PATH_DISTORTED = "" 38 | 39 | focal_start = 40 40 | focal_end = 500 41 | classes_focal = list(np.arange(focal_start, focal_end+1, 10))# focal 42 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 43 | 44 | 45 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 46 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 47 | paths_train.sort() 48 | parameters = [] 49 | labels_focal_train = [] 50 | for path in paths_train: 51 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 52 | parameters.append(curr_parameter) 53 | curr_class = curr_parameter 54 | labels_focal_train.append((curr_class - focal_start*1.) / (focal_end+1. - focal_start*1.)) #normalize bewteen 0 and 1 55 | labels_distortion_train = [] 56 | for path in paths_train: 57 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 58 | parameters.append(curr_parameter) 59 | curr_class = curr_parameter 60 | labels_distortion_train.append(curr_class) 61 | 62 | c = list(zip(paths_train, labels_focal_train,labels_distortion_train)) 63 | random.shuffle(c) 64 | paths_train, labels_focal_train,labels_distortion_train = zip(*c) 65 | paths_train, labels_focal_train, labels_distortion_train = list(paths_train), list(labels_focal_train), list(labels_distortion_train) 66 | labels_train = labels_distortion_train 67 | input_train = [list(a) for a in zip(paths_train, labels_focal_train)] 68 | 69 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 70 | paths_valid.sort() 71 | parameters = [] 72 | labels_focal_valid = [] 73 | for path in paths_valid: 74 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 75 | parameters.append(curr_parameter) 76 | curr_class = curr_parameter 77 | labels_focal_valid.append((curr_class-focal_start*1.)/(focal_end+1.-focal_start*1.)) #normalize bewteen 0 and 1 78 | labels_distortion_valid = [] 79 | for path in paths_valid: 80 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 81 | parameters.append(curr_parameter) 82 | curr_class = curr_parameter 83 | labels_distortion_valid.append(curr_class) 84 | 85 | c = list(zip(paths_valid, labels_focal_valid, labels_distortion_valid)) 86 | random.shuffle(c) 87 | paths_valid, labels_focal_valid, labels_distortion_valid = zip(*c) 88 | paths_valid, labels_focal_valid, labels_distortion_valid = list(paths_valid), list(labels_focal_valid), list(labels_distortion_valid) 89 | labels_valid = labels_distortion_valid 90 | input_valid = [list(a) for a in zip(paths_valid, labels_focal_valid)] 91 | 92 | return input_train, labels_train, input_valid, labels_valid 93 | 94 | input_train, labels_train, input_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 95 | 96 | print(len(input_train), 'train samples') 97 | print(len(input_valid), 'valid samples') 98 | 99 | with tf.device('/gpu:0'): 100 | image_shape = (299, 299, 3) 101 | image_input = Input(shape=image_shape, dtype='float32', name='main_input') 102 | input_shape_concat = (len(classes_focal),) 103 | concat_input = Input(shape=input_shape_concat, dtype='float32', name='concat_input') 104 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=image_input, input_shape=image_shape) 105 | phi_features = phi_model.output 106 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 107 | phi_concat = Concatenate(axis=-1)([phi_flattened, concat_input]) 108 | final_output_dist = Dense(1, activation='sigmoid', name='output_focal')(phi_concat) 109 | 110 | model = Model(input=[image_input,concat_input], output= final_output_dist) 111 | 112 | learning_rate = 10 ** -5 113 | adam = optimizers.Adam(lr=learning_rate) 114 | model.compile(loss='logcosh', 115 | optimizer=adam, 116 | metrics=['logcosh']) 117 | model.summary() 118 | model_json = phi_model.to_json() 119 | 120 | with open(output_folder + "model.json", "w") as json_file: 121 | json_file.write(model_json) 122 | 123 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 124 | 125 | tensorboard = TensorBoard(log_dir=output_log) 126 | 127 | checkpointer = CustomModelCheckpoint( 128 | model_for_saving=model, 129 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 130 | save_best_only=True, 131 | monitor='val_loss', 132 | save_weights_only=True 133 | ) 134 | 135 | generator_training = RotNetDataGenerator(input_shape=image_shape, batch_size=batch_size, one_hot=True, 136 | preprocess_func=preprocess_input, shuffle=True).generate(input_train, 137 | labels_train,len(classes_focal)) 138 | generator_valid = RotNetDataGenerator(input_shape=image_shape, batch_size=batch_size, one_hot=True, 139 | preprocess_func=preprocess_input, shuffle=True).generate(input_valid, 140 | labels_valid,len(classes_focal)) 141 | 142 | # training loop 143 | model.fit_generator( 144 | generator=generator_training, 145 | steps_per_epoch=(len(input_train) // batch_size), 146 | epochs=nb_epoch, 147 | validation_data=generator_valid, 148 | validation_steps=(len(input_valid) // batch_size), 149 | callbacks=[tensorboard, checkpointer], 150 | use_multiprocessing=True, 151 | workers=2 152 | #verbose=3 153 | ) 154 | -------------------------------------------------------------------------------- /network_training/Regression/Seq_Net/focal/train_regressor_focal_concat_dist.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input, Concatenate 9 | from utils_regressor_focal_concat_dist import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.allow_growth = True 20 | config.allow_soft_placement = True 21 | set_session(tf.Session(config=config)) 22 | 23 | model_name = 'model_multi_class/' 24 | SAVE = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 25 | # Save 26 | output_folder = SAVE + model_name 27 | if not os.path.exists(output_folder): 28 | os.makedirs(output_folder) 29 | 30 | output_log = output_folder + "Log/" 31 | if not os.path.exists(output_log): 32 | os.makedirs(output_log) 33 | 34 | output_weight = output_folder + "Best/" 35 | if not os.path.exists(output_weight): 36 | os.makedirs(output_weight) 37 | 38 | # training parameters 39 | batch_size = 64 40 | nb_epoch = 10000 41 | 42 | IMAGE_FILE_PATH_DISTORTED = "" 43 | 44 | focal_start = 40 45 | focal_end = 500 46 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 47 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 48 | 49 | 50 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 51 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'train/' + "*.jpg") 52 | paths_train.sort() 53 | parameters = [] 54 | labels_focal_train = [] 55 | for path in paths_train: 56 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 57 | parameters.append(curr_parameter) 58 | curr_class = curr_parameter 59 | labels_focal_train.append((curr_class - focal_start*1.) / (focal_end+1. - focal_start*1.)) #normalize bewteen 0 and 1 60 | labels_distortion_train = [] 61 | for path in paths_train: 62 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 63 | parameters.append(curr_parameter) 64 | curr_class = curr_parameter 65 | labels_distortion_train.append(curr_class) 66 | 67 | c = list(zip(paths_train, labels_focal_train,labels_distortion_train)) 68 | random.shuffle(c) 69 | paths_train, labels_focal_train,labels_distortion_train = zip(*c) 70 | paths_train, labels_focal_train, labels_distortion_train = list(paths_train), list(labels_focal_train), list(labels_distortion_train) 71 | labels_train = labels_focal_train 72 | input_train = [list(a) for a in zip(paths_train, labels_distortion_train)] 73 | 74 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 75 | paths_valid.sort() 76 | parameters = [] 77 | labels_focal_valid = [] 78 | for path in paths_valid: 79 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 80 | parameters.append(curr_parameter) 81 | curr_class = curr_parameter 82 | labels_focal_valid.append((curr_class-focal_start*1.)/(focal_end+1.-focal_start*1.)) #normalize bewteen 0 and 1 83 | labels_distortion_valid = [] 84 | for path in paths_valid: 85 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 86 | parameters.append(curr_parameter) 87 | curr_class = curr_parameter 88 | labels_distortion_valid.append(curr_class) 89 | 90 | c = list(zip(paths_valid, labels_focal_valid, labels_distortion_valid)) 91 | random.shuffle(c) 92 | paths_valid, labels_focal_valid, labels_distortion_valid = zip(*c) 93 | paths_valid, labels_focal_valid, labels_distortion_valid = list(paths_valid), list(labels_focal_valid), list(labels_distortion_valid) 94 | labels_valid = labels_focal_valid 95 | input_valid = [list(a) for a in zip(paths_valid, labels_distortion_valid)] 96 | 97 | return input_train, labels_train, input_valid, labels_valid 98 | 99 | input_train, labels_train, input_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 100 | 101 | print(len(input_train), 'train samples') 102 | print(len(input_valid), 'valid samples') 103 | 104 | with tf.device('/gpu:1'): 105 | image_shape = (299, 299, 3) 106 | image_input = Input(shape=image_shape, dtype='float32', name='main_input') 107 | input_shape_concat = (len(classes_distortion),) 108 | concat_input = Input(shape=input_shape_concat, dtype='float32', name='concat_input') 109 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=image_input, input_shape=image_shape) 110 | phi_features = phi_model.output 111 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 112 | phi_concat = Concatenate(axis=-1)([phi_flattened, concat_input]) 113 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_concat) 114 | 115 | model = Model(input=[image_input,concat_input], output= final_output_focal) 116 | 117 | learning_rate = 10 ** -5 118 | adam = optimizers.Adam(lr=learning_rate) 119 | model.compile(loss='logcosh', 120 | optimizer=adam, 121 | metrics=['logcosh']) 122 | model.summary() 123 | model_json = phi_model.to_json() 124 | 125 | with open(output_folder + "model.json", "w") as json_file: 126 | json_file.write(model_json) 127 | 128 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 129 | 130 | tensorboard = TensorBoard(log_dir=output_log) 131 | 132 | checkpointer = CustomModelCheckpoint( 133 | model_for_saving=model, 134 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 135 | save_best_only=True, 136 | monitor='val_loss', 137 | save_weights_only=True 138 | ) 139 | 140 | generator_training = RotNetDataGenerator(input_shape=image_shape, batch_size=batch_size, one_hot=True, 141 | preprocess_func=preprocess_input, shuffle=True).generate(input_train, 142 | labels_train,len(classes_distortion)) 143 | generator_valid = RotNetDataGenerator(input_shape=image_shape, batch_size=batch_size, one_hot=True, 144 | preprocess_func=preprocess_input, shuffle=True).generate(input_valid, 145 | labels_valid,len(classes_distortion)) 146 | 147 | # training loop 148 | model.fit_generator( 149 | generator=generator_training, 150 | steps_per_epoch=(len(input_train) // batch_size), 151 | epochs=nb_epoch, 152 | validation_data=generator_valid, 153 | validation_steps=(len(input_valid) // batch_size), 154 | callbacks=[tensorboard, checkpointer], 155 | use_multiprocessing=True, 156 | workers=2, 157 | #verbose=3 158 | ) 159 | -------------------------------------------------------------------------------- /network_training/Regression/Single_net/train_regressor_dist_focal.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | from keras.callbacks import TensorBoard, LearningRateScheduler 5 | from keras.applications.inception_v3 import InceptionV3 6 | from keras.applications.imagenet_utils import preprocess_input 7 | from keras.models import Model 8 | from keras.layers import Dense, Flatten, Input 9 | from utils_regressor_focal_dist import RotNetDataGenerator, angle_error, CustomModelCheckpoint 10 | from keras import optimizers 11 | import numpy as np 12 | import glob, math 13 | from shutil import copyfile 14 | import datetime, random 15 | import tensorflow as tf 16 | from keras.backend.tensorflow_backend import set_session 17 | 18 | config = tf.ConfigProto() 19 | config.gpu_options.visible_device_list= '0, 1' 20 | config.gpu_options.allow_growth = False 21 | config.allow_soft_placement = False 22 | config.log_device_placement = False 23 | set_session(tf.Session(config=config)) 24 | 25 | model_name = 'model_multi_class/' 26 | SAVE = "new_logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '/' 27 | # Save 28 | output_folder = SAVE + model_name 29 | if not os.path.exists(output_folder): 30 | os.makedirs(output_folder) 31 | 32 | output_log = output_folder + "Log/" 33 | if not os.path.exists(output_log): 34 | os.makedirs(output_log) 35 | 36 | output_weight = output_folder + "Best/" 37 | if not os.path.exists(output_weight): 38 | os.makedirs(output_weight) 39 | 40 | # training parameters 41 | batch_size = 50 42 | nb_epoch = 10000 43 | 44 | IMAGE_FILE_PATH_DISTORTED = "" 45 | 46 | focal_start = 40 47 | focal_end = 500 48 | dist_end = 1.2 49 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 50 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 51 | 52 | 53 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 54 | paths_train = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'train/' + "*.jpg") 55 | paths_train.sort() 56 | parameters = [] 57 | labels_focal_train = [] 58 | for path in paths_train: 59 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 60 | labels_focal_train.append((curr_parameter - focal_start*1.) / (focal_end*1. - focal_start*1.)) #normalize bewteen 0 and 1 61 | labels_distortion_train = [] 62 | for path in paths_train: 63 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 64 | labels_distortion_train.append(curr_parameter/1.2) 65 | 66 | c = list(zip(paths_train, labels_focal_train,labels_distortion_train)) 67 | random.shuffle(c) 68 | paths_train, labels_focal_train,labels_distortion_train = zip(*c) 69 | paths_train, labels_focal_train, labels_distortion_train = list(paths_train), list(labels_focal_train), list(labels_distortion_train) 70 | labels_train = [list(a) for a in zip(labels_focal_train, labels_distortion_train)] 71 | 72 | paths_valid = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'valid/' + "*.jpg") 73 | paths_valid.sort() 74 | parameters = [] 75 | labels_focal_valid = [] 76 | for path in paths_valid: 77 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 78 | labels_focal_valid.append((curr_parameter-focal_start*1.)/(focal_end*1.+1.-focal_start*1.)) #normalize bewteen 0 and 1 79 | labels_distortion_valid = [] 80 | for path in paths_valid: 81 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 82 | labels_distortion_valid.append(curr_parameter/1.2) 83 | 84 | c = list(zip(paths_valid, labels_focal_valid, labels_distortion_valid)) 85 | random.shuffle(c) 86 | paths_valid, labels_focal_valid, labels_distortion_valid = zip(*c) 87 | paths_valid, labels_focal_valid, labels_distortion_valid = list(paths_valid), list(labels_focal_valid), list(labels_distortion_valid) 88 | labels_valid = [list(a) for a in zip(labels_focal_valid, labels_distortion_valid)] 89 | 90 | 91 | return paths_train, labels_train, paths_valid, labels_valid 92 | 93 | 94 | paths_train, labels_train, paths_valid, labels_valid = get_paths(IMAGE_FILE_PATH_DISTORTED) 95 | 96 | print(len(paths_train), 'train samples') 97 | print(len(paths_valid), 'valid samples') 98 | 99 | input_shape = (299, 299, 3) 100 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 101 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 102 | phi_features = phi_model.output 103 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 104 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_flattened) 105 | final_output_distortion = Dense(1, activation='sigmoid', name='output_distortion')(phi_flattened) 106 | 107 | layer_index = 0 108 | for layer in phi_model.layers: 109 | layer.name = layer.name + "_phi" 110 | 111 | model = Model(input=main_input, output=[final_output_focal, final_output_distortion]) 112 | 113 | learning_rate = 10 ** -6 114 | 115 | adam = optimizers.Adam(lr=learning_rate) 116 | model.compile(loss={'output_focal':'logcosh', 'output_distortion':'logcosh'}, 117 | optimizer=adam, 118 | metrics={'output_focal':'logcosh','output_distortion':'logcosh'} 119 | ) 120 | model.summary() 121 | model_json = phi_model.to_json() 122 | 123 | with open(output_folder + "model.json", "w") as json_file: 124 | json_file.write(model_json) 125 | 126 | copyfile(os.path.basename(__file__), output_folder + os.path.basename(__file__)) 127 | 128 | tensorboard = TensorBoard(log_dir=output_log) 129 | 130 | checkpointer = CustomModelCheckpoint( 131 | model_for_saving=model, 132 | filepath=output_weight + "weights_{epoch:02d}_{val_loss:.2f}.h5", 133 | save_best_only=True, 134 | monitor='val_loss', 135 | save_weights_only=True 136 | ) 137 | 138 | generator_training = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 139 | preprocess_func=preprocess_input, shuffle=True).generate(paths_train, 140 | labels_train) 141 | generator_valid = RotNetDataGenerator(input_shape=input_shape, batch_size=batch_size, one_hot=True, 142 | preprocess_func=preprocess_input, shuffle=True).generate(paths_valid, 143 | labels_valid) 144 | 145 | # training loop 146 | model.fit_generator( 147 | generator=generator_training, 148 | steps_per_epoch=(len(paths_train) // batch_size), # 29977 149 | epochs=nb_epoch, 150 | validation_data=generator_valid, 151 | validation_steps=(len(paths_valid) // batch_size), 152 | callbacks=[tensorboard, checkpointer], 153 | use_multiprocessing=True, 154 | workers=2, 155 | #verbose=3 156 | ) 157 | -------------------------------------------------------------------------------- /prediction/Classification/Dual_Net/dist/predict_classifier_dist_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob, os 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | config = tf.ConfigProto() 15 | config.gpu_options.allow_growth = True 16 | config.allow_soft_placement = True 17 | set_session(tf.Session(config=config)) 18 | 19 | 20 | IMAGE_FILE_PATH_DISTORTED = "" 21 | path_to_weights = 'weights_06_3.17.h5' 22 | 23 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 24 | 25 | if os.path.exists(filename_results): 26 | sys.exit("file exists") 27 | 28 | classes_focal = list(np.arange(40, 501, 10)) 29 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 30 | 31 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 32 | 33 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 34 | paths_test.sort() 35 | 36 | return paths_test 37 | 38 | paths_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 39 | 40 | print(len(paths_test), 'test samples') 41 | 42 | with tf.device('/gpu:0'): 43 | input_shape = (299, 299, 3) 44 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 45 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 46 | phi_features = phi_model.output 47 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 48 | final_output_phi = Dense(len(classes_distortion), activation='softmax', name='fc181-phi')(phi_flattened) 49 | 50 | layer_index = 0 51 | for layer in phi_model.layers: 52 | layer.name = layer.name + "_phi" 53 | 54 | model = Model(input=main_input, output=final_output_phi) 55 | model.load_weights(path_to_weights) 56 | 57 | n_acc_focal = 0 58 | n_acc_dist = 0 59 | print(len(paths_test)) 60 | file = open(filename_results, 'a') 61 | for i, path in enumerate(paths_test): 62 | if i % 1000 == 0: 63 | print(i,' ',len(paths_test)) 64 | image = cv2.imread(path) 65 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 66 | image = image / 255. 67 | image = image - 0.5 68 | image = image * 2. 69 | image = np.expand_dims(image,0) 70 | 71 | image = preprocess_input(image) 72 | 73 | # loop 74 | prediction_dist = model.predict(image) 75 | 76 | 77 | n_acc_dist += classes_distortion[np.argmax(prediction_dist[0])] 78 | 79 | file.write(path + '\tlabel_focal\t' + str(classes_focal[labels_test[i][0]]) + '\tlabel_dist\t' + str(classes_distortion[labels_test[i][1]]) + '\tprediction_dist\t' + str(classes_distortion[np.argmax(prediction_dist[0])])+'\n') 80 | 81 | print('dist:') 82 | print(n_acc_dist/len(paths_test)) 83 | file.close() 84 | -------------------------------------------------------------------------------- /prediction/Classification/Dual_Net/focal/predict_classifier_focal_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob, os 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | config = tf.ConfigProto() 15 | config.gpu_options.allow_growth = True 16 | config.allow_soft_placement = True 17 | set_session(tf.Session(config=config)) 18 | 19 | 20 | IMAGE_FILE_PATH_DISTORTED = "" 21 | path_to_weights = 'weights_06_3.17.h5' 22 | 23 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 24 | 25 | if os.path.exists(filename_results): 26 | sys.exit("file exists") 27 | 28 | classes_focal = list(np.arange(40, 501, 10)) 29 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 30 | 31 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 32 | 33 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 34 | paths_test.sort() 35 | 36 | return paths_test 37 | 38 | paths_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 39 | 40 | print(len(paths_test), 'test samples') 41 | 42 | with tf.device('/gpu:0'): 43 | input_shape = (299, 299, 3) 44 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 45 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 46 | phi_features = phi_model.output 47 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 48 | final_output_phi = Dense(len(classes_focal), activation='softmax', name='fc181-phi')(phi_flattened) 49 | 50 | layer_index = 0 51 | for layer in phi_model.layers: 52 | layer.name = layer.name + "_phi" 53 | 54 | model = Model(input=main_input, output=final_output_phi) 55 | model.load_weights(path_to_weights) 56 | 57 | n_acc_focal = 0 58 | n_acc_dist = 0 59 | print(len(paths_test)) 60 | file = open(filename_results, 'a') 61 | for i, path in enumerate(paths_test): 62 | if i % 1000 == 0: 63 | print(i,' ',len(paths_test)) 64 | image = cv2.imread(path) 65 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 66 | image = image / 255. 67 | image = image - 0.5 68 | image = image * 2. 69 | image = np.expand_dims(image,0) 70 | 71 | image = preprocess_input(image) 72 | 73 | # loop 74 | prediction_focal = model.predict(image) 75 | 76 | 77 | n_acc_focal += classes_distortion[np.argmax(prediction_focal[0])] 78 | 79 | file.write(path + '\tprediction_focal\t' + str(classes_focal[np.argmax(prediction_focal[0])]) + '\tlabel_focal\t' + str(classes_focal[labels_test[i][0]]) + '\tlabel_dist\t' + str(classes_distortion[labels_test[i][1]]) + '\n') 80 | 81 | print('focal:') 82 | print(n_acc_focal/len(paths_test)) 83 | file.close() 84 | -------------------------------------------------------------------------------- /prediction/Classification/Seq_Net/dist/predict_classifier_dist_concat_focal_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input, Concatenate 8 | from keras.utils.np_utils import to_categorical 9 | from keras import optimizers 10 | import numpy as np 11 | import glob, os 12 | import tensorflow as tf 13 | from keras.backend.tensorflow_backend import set_session 14 | 15 | config = tf.ConfigProto() 16 | config.gpu_options.allow_growth = True 17 | config.allow_soft_placement = True 18 | set_session(tf.Session(config=config)) 19 | 20 | 21 | IMAGE_FILE_PATH_DISTORTED = "" 22 | path_to_weights = 'weights_05_3.17.h5' 23 | 24 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 25 | 26 | if os.path.exists(filename_results): 27 | sys.exit("file exists") 28 | 29 | classes_focal = list(np.arange(40, 501, 10)) 30 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 31 | 32 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 33 | 34 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'test/' + "*.jpg") 35 | 36 | paths_test.sort() 37 | parameters = [] 38 | labels_focal_test = [] 39 | for path in paths_test: 40 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 41 | parameters.append(curr_parameter) 42 | curr_class = classes_focal.index(curr_parameter) 43 | labels_focal_test.append(curr_class) 44 | labels_distortion_test = [] 45 | 46 | for path in paths_test: 47 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 48 | parameters.append(curr_parameter) 49 | curr_class = classes_distortion.index(curr_parameter) 50 | labels_distortion_test.append(curr_class) 51 | 52 | c = list(zip(paths_test, labels_focal_test, labels_distortion_test)) 53 | 54 | paths_test, labels_focal_test, labels_distortion_test = zip(*c) 55 | paths_test, labels_focal_test, labels_distortion_test = list(paths_test), list(labels_focal_test), list( 56 | labels_distortion_test) 57 | labels_test = labels_distortion_test 58 | input_test = [list(a) for a in zip(paths_test, labels_focal_test)] 59 | 60 | return input_test, labels_test 61 | 62 | input_test, labels_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 63 | 64 | print(len(input_test), 'test samples') 65 | 66 | with tf.device('/gpu:1'): 67 | image_shape = (299, 299, 3) 68 | image_input = Input(shape=image_shape, dtype='float32', name='main_input') 69 | input_shape_concat = (len(classes_focal),) 70 | concat_input = Input(shape=input_shape_concat, dtype='float32', name='concat_input') 71 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=image_input, input_shape=image_shape)#,pooling='avg') 72 | phi_features = phi_model.output 73 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 74 | phi_concat = Concatenate(axis=-1)([phi_flattened,concat_input]) 75 | final_output_distortion = Dense(len(classes_distortion), activation='softmax', name='output_distortion')(phi_concat) 76 | 77 | layer_index = 0 78 | for layer in phi_model.layers: 79 | layer.name = layer.name + "_phi" 80 | 81 | model = Model(input=[image_input, concat_input], output=final_output_distortion) 82 | model.load_weights(path_to_weights) 83 | 84 | n_acc_focal = 0 85 | n_acc_dist = 0 86 | print(len(input_test)) 87 | file = open(filename_results, 'a') 88 | for i, curr_input in enumerate(input_test): 89 | if i % 1000 == 0: 90 | print(i,' ',len(input_test)) 91 | image = cv2.imread(curr_input[0]) 92 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 93 | image = image / 255. 94 | image = image - 0.5 95 | image = image * 2. 96 | image = np.expand_dims(image,0) 97 | 98 | image = preprocess_input(image) 99 | 100 | # loop 101 | input_focal = to_categorical(int(curr_input[1]), len(classes_focal)) 102 | prediction_dist = model.predict({'main_input_phi':image, 'concat_input':input_focal.reshape((1, -1))}) 103 | 104 | n_acc_dist += classes_distortion[np.argmax(prediction_dist[0])] 105 | 106 | 107 | file.write(curr_input[0] + '\tlabel_focal\t' + str(classes_focal[curr_input[1]]) + '\tlabel_dist\t' + str(classes_distortion[labels_test[i]]) + '\tprediction_dist\t' + str(classes_distortion[np.argmax(prediction_dist[0])])+'\n') 108 | 109 | print('dist:') 110 | print(n_acc_dist/len(paths_test)) 111 | file.close() 112 | -------------------------------------------------------------------------------- /prediction/Classification/Seq_Net/focal/predict_classifier_focal_concat_dist_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input, Concatenate 8 | from keras.utils.np_utils import to_categorical 9 | from keras import optimizers 10 | import numpy as np 11 | import glob, os 12 | import tensorflow as tf 13 | from keras.backend.tensorflow_backend import set_session 14 | 15 | config = tf.ConfigProto() 16 | config.gpu_options.allow_growth = True 17 | config.allow_soft_placement = True 18 | set_session(tf.Session(config=config)) 19 | 20 | 21 | IMAGE_FILE_PATH_DISTORTED = "" 22 | path_to_weights = 'weights_06_2.49.h5' 23 | 24 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 25 | 26 | if os.path.exists(filename_results): 27 | sys.exit("file exists") 28 | 29 | classes_focal = list(np.arange(40, 501, 10)) 30 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 31 | 32 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 33 | 34 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'test/' + "*.jpg") 35 | paths_test.sort() 36 | parameters = [] 37 | labels_focal_test = [] 38 | for path in paths_test: 39 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 40 | parameters.append(curr_parameter) 41 | curr_class = classes_focal.index(curr_parameter) 42 | labels_focal_test.append(curr_class) 43 | labels_distortion_test = [] 44 | 45 | for path in paths_test: 46 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 47 | parameters.append(curr_parameter) 48 | curr_class = classes_distortion.index(curr_parameter) 49 | labels_distortion_test.append(curr_class) 50 | 51 | c = list(zip(paths_test, labels_focal_test, labels_distortion_test)) 52 | 53 | paths_test, labels_focal_test, labels_distortion_test = zip(*c) 54 | paths_test, labels_focal_test, labels_distortion_test = list(paths_test), list(labels_focal_test), list( 55 | labels_distortion_test) 56 | labels_test = labels_distortion_test 57 | input_test = [list(a) for a in zip(paths_test, labels_focal_test)] 58 | 59 | return input_test, labels_test 60 | 61 | input_test, labels_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 62 | 63 | print(len(input_test), 'test samples') 64 | 65 | with tf.device('/gpu:1'): 66 | image_shape = (299, 299, 3) 67 | image_input = Input(shape=image_shape, dtype='float32', name='main_input') 68 | input_shape_concat = (len(classes_distortion),) 69 | concat_input = Input(shape=input_shape_concat, dtype='float32', name='concat_input') 70 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=image_input, input_shape=image_shape) 71 | phi_features = phi_model.output 72 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 73 | phi_concat = Concatenate(axis=-1)([phi_flattened,concat_input]) 74 | final_output_focal = Dense(len(classes_focal), activation='softmax', name='output_focal')(phi_concat) 75 | 76 | layer_index = 0 77 | for layer in phi_model.layers: 78 | layer.name = layer.name + "_phi" 79 | 80 | model = Model(input=[image_input, concat_input], output=final_output_focal) 81 | model.load_weights(path_to_weights) 82 | 83 | n_acc_focal = 0 84 | n_acc_dist = 0 85 | print(len(input_test)) 86 | file = open(filename_results, 'a') 87 | for i, curr_input in enumerate(input_test): 88 | if i % 1000 == 0: 89 | print(i,' ',len(input_test)) 90 | image = cv2.imread(curr_input[0]) 91 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 92 | image = image / 255. 93 | image = image - 0.5 94 | image = image * 2. 95 | image = np.expand_dims(image,0) 96 | 97 | image = preprocess_input(image) 98 | 99 | # loop 100 | input_distortion = to_categorical(int(curr_input[1]), len(classes_distortion)) 101 | prediction_focal = model.predict({'main_input_phi':image, 'concat_input':input_distortion.reshape((1, -1))}) 102 | 103 | n_acc_focal += classes_focal[np.argmax(prediction_focal[0])] 104 | 105 | file.write(curr_input[0] + '\tlabel_distortion\t' + str(classes_distortion[curr_input[1]]) + '\tlabel_focal\t' + str(classes_focal[labels_test[i]]) + '\tprediction_focal\t' + str(classes_focal[np.argmax(prediction_focal[0])])+'\n') 106 | 107 | print('focal:') 108 | print(n_acc_focal/len(paths_test)) 109 | file.close() 110 | -------------------------------------------------------------------------------- /prediction/Classification/Single_net/predict_classifier_dist_focal.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob, os 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | results_path = "" 15 | IMAGE_FILE_PATH_DISTORTED = "" 16 | 17 | path_to_weights = 'weights_06_5.61.h5' 18 | 19 | filename_results = results_path + 'airport.txt' 20 | 21 | classes_focal = list(np.arange(40, 501, 10)) 22 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 23 | 24 | 25 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 26 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 27 | paths_test.sort() 28 | 29 | return paths_test 30 | 31 | 32 | paths_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 33 | 34 | print(len(paths_test), 'test samples') 35 | 36 | with tf.device('/gpu:0'): 37 | input_shape = (299, 299, 3) 38 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 39 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 40 | phi_features = phi_model.output 41 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 42 | final_output_focal = Dense(len(classes_focal), activation='softmax', name='output_focal')(phi_flattened) 43 | final_output_distortion = Dense(len(classes_distortion), activation='softmax', name='output_distortion')( 44 | phi_flattened) 45 | 46 | layer_index = 0 47 | for layer in phi_model.layers: 48 | layer.name = layer.name + "_phi" 49 | 50 | model = Model(input=main_input, output=[final_output_focal, final_output_distortion]) 51 | model.load_weights(path_to_weights) 52 | 53 | n_acc_focal = 0 54 | n_acc_dist = 0 55 | print(len(paths_test)) 56 | 57 | file = open(filename_results, 'a') 58 | 59 | #input image 60 | image = cv2.imread('') 61 | 62 | image = cv2.resize(image, (299, 299)) 63 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 64 | image = image / 255. 65 | 66 | image = image - 0.5 67 | 68 | image = image * 2. 69 | image = np.expand_dims(image, 0) 70 | 71 | image = preprocess_input(image) 72 | 73 | # loop 74 | prediction_focal = model.predict(image)[0] 75 | prediction_dist = model.predict(image)[1] 76 | n_acc_focal += classes_focal[np.argmax(prediction_focal[0])] 77 | n_acc_dist += classes_distortion[np.argmax(prediction_dist[0])] 78 | 79 | #file.write(path + '\tprediction_focal\t' + str( 80 | #classes_focal[np.argmax(prediction_focal[0])]) + '\tprediction_dist\t' + str( 81 | # classes_distortion[np.argmax(prediction_dist[0])]) + '\n') 82 | # print(' ') 83 | print('focal:') 84 | print(classes_focal[np.argmax(prediction_focal[0])]) 85 | 86 | print('dist:') 87 | print(classes_distortion[np.argmax(prediction_dist[0])]) 88 | -------------------------------------------------------------------------------- /prediction/Regression/Dual_Net/dist/predict_regressor_dist_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob, os 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | config = tf.ConfigProto() 15 | config.gpu_options.allow_growth = True 16 | config.allow_soft_placement = True 17 | set_session(tf.Session(config=config)) 18 | 19 | 20 | 21 | IMAGE_FILE_PATH_DISTORTED = "" 22 | path_to_weights = 'weights_06_0.02.h5' 23 | 24 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 25 | 26 | if os.path.exists(filename_results): 27 | sys.exit("file exists") 28 | 29 | focal_start = 80 30 | focal_end = 300 31 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 32 | classes_distortion = list(np.arange(0, 41, 1) / 40.) 33 | 34 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 35 | 36 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'test/' + "*.jpg") 37 | paths_test.sort() 38 | parameters = [] 39 | labels_focal_test = [] 40 | for path in paths_test: 41 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 42 | labels_focal_test.append((curr_parameter - focal_start*1.) / (focal_end+1. - focal_start*1.)) #normalize bewteen 0 and 1 43 | labels_distortion_test = [] 44 | for path in paths_test: 45 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 46 | labels_distortion_test.append(curr_parameter*1.2) 47 | 48 | c = list(zip(paths_test, labels_focal_test, labels_distortion_test)) 49 | paths_test, labels_focal_test, labels_distortion_test = zip(*c) 50 | paths_test, labels_focal_test, labels_distortion_test = list(paths_test), list(labels_focal_test), list( 51 | labels_distortion_test) 52 | labels_test = [list(a) for a in zip(labels_focal_test, labels_distortion_test)] 53 | 54 | return paths_test, labels_test 55 | 56 | paths_test, labels_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 57 | 58 | print(len(paths_test), 'test samples') 59 | 60 | with tf.device('/gpu:0'): 61 | input_shape = (299, 299, 3) 62 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 63 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 64 | phi_features = phi_model.output 65 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 66 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_flattened) 67 | final_output_distortion = Dense(1, activation='sigmoid', name='output_distortion')(phi_flattened) 68 | 69 | layer_index = 0 70 | for layer in phi_model.layers: 71 | layer.name = layer.name + "_phi" 72 | 73 | model = Model(input=main_input, output=[final_output_focal, final_output_distortion]) 74 | model.load_weights(path_to_weights) 75 | 76 | n_acc_focal = 0 77 | n_acc_dist = 0 78 | print(len(paths_test)) 79 | file = open(filename_results, 'a') 80 | for i, path in enumerate(paths_test): 81 | if i % 1000 == 0: 82 | print(i,' ',len(paths_test)) 83 | image = cv2.imread(path) 84 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 85 | image = image / 255. 86 | image = image - 0.5 87 | image = image * 2. 88 | image = np.expand_dims(image,0) 89 | 90 | image = preprocess_input(image) 91 | 92 | # loop 93 | prediction_focal = model.predict(image)[0] 94 | prediction_dist = model.predict(image)[1] 95 | 96 | if np.argmax(prediction_focal[0]) == labels_test[i][0]: 97 | n_acc_focal = n_acc_focal + 1 98 | if np.argmax(prediction_dist[0]) == labels_test[i][1]: 99 | n_acc_dist = n_acc_dist + 1 100 | 101 | curr_focal_label = labels_test[i][0] * (focal_end+1. - focal_start*1.) + focal_start*1. 102 | curr_focal_pred = prediction_focal[0][0] * (focal_end+1. - focal_start*1.) + focal_start*1. 103 | curr_dist_label = labels_test[i][1]*1.2 104 | curr_dist_pred = prediction_dist[0][0]*1.2 105 | file.write(path + '\tlabel_focal\t' + str(curr_focal_label) + '\tprediction_focal\t' + str(curr_focal_pred) + '\tlabel_dist\t' + str(curr_dist_label) + '\tprediction_dist\t' + str(curr_dist_pred)+'\n') 106 | print('focal:') 107 | print(n_acc_focal) 108 | print(len(paths_test)) 109 | print(n_acc_focal*1.0/(len(paths_test)*1.0)) 110 | 111 | print('dist:') 112 | print(n_acc_dist) 113 | print(len(paths_test)) 114 | print(n_acc_dist * 1.0 / (len(paths_test) * 1.0)) 115 | file.close() 116 | -------------------------------------------------------------------------------- /prediction/Regression/Dual_Net/focal/predict_regressor_focal_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob, os 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | config = tf.ConfigProto() 15 | config.gpu_options.allow_growth = True 16 | config.allow_soft_placement = True 17 | set_session(tf.Session(config=config)) 18 | 19 | IMAGE_FILE_PATH_DISTORTED = "" 20 | path_to_weights = 'weights_06_0.02.h5' 21 | 22 | filename_results = os.path.split(path_to_weights)[0]+'/results.txt' 23 | 24 | if os.path.exists(filename_results): 25 | sys.exit("file exists") 26 | 27 | focal_start = 80 28 | focal_end = 300 29 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 30 | classes_distortion = list(np.arange(0, 41, 1) / 40.) 31 | 32 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 33 | 34 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + 'test/' + "*.jpg") 35 | paths_test.sort() 36 | parameters = [] 37 | labels_focal_test = [] 38 | for path in paths_test: 39 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 40 | labels_focal_test.append((curr_parameter - focal_start*1.) / (focal_end+1. - focal_start*1.)) #normalize bewteen 0 and 1 41 | labels_distortion_test = [] 42 | for path in paths_test: 43 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 44 | labels_distortion_test.append(curr_parameter*1.2) 45 | 46 | c = list(zip(paths_test, labels_focal_test, labels_distortion_test)) 47 | paths_test, labels_focal_test, labels_distortion_test = zip(*c) 48 | paths_test, labels_focal_test, labels_distortion_test = list(paths_test), list(labels_focal_test), list( 49 | labels_distortion_test) 50 | labels_test = [list(a) for a in zip(labels_focal_test, labels_distortion_test)] 51 | 52 | return paths_test, labels_test 53 | 54 | paths_test, labels_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 55 | 56 | print(len(paths_test), 'test samples') 57 | 58 | with tf.device('/gpu:0'): 59 | input_shape = (299, 299, 3) 60 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 61 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 62 | phi_features = phi_model.output 63 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 64 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_flattened) 65 | 66 | layer_index = 0 67 | for layer in phi_model.layers: 68 | layer.name = layer.name + "_phi" 69 | 70 | model = Model(input=main_input, output=final_output_focal) 71 | model.load_weights(path_to_weights) 72 | 73 | n_acc_focal = 0 74 | n_acc_dist = 0 75 | print(len(paths_test)) 76 | file = open(filename_results, 'a') 77 | for i, path in enumerate(paths_test): 78 | if i % 1000 == 0: 79 | print(i,' ',len(paths_test)) 80 | image = cv2.imread(path) 81 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 82 | image = image / 255. 83 | image = image - 0.5 84 | image = image * 2. 85 | image = np.expand_dims(image,0) 86 | 87 | image = preprocess_input(image) 88 | 89 | # loop 90 | prediction_focal = model.predict(image)[0] 91 | prediction_dist = model.predict(image)[1] 92 | 93 | if np.argmax(prediction_focal[0]) == labels_test[i][0]: 94 | n_acc_focal = n_acc_focal + 1 95 | if np.argmax(prediction_dist[0]) == labels_test[i][1]: 96 | n_acc_dist = n_acc_dist + 1 97 | 98 | curr_focal_label = labels_test[i][0] * (focal_end+1. - focal_start*1.) + focal_start*1. 99 | curr_focal_pred = prediction_focal[0][0] * (focal_end+1. - focal_start*1.) + focal_start*1. 100 | curr_dist_label = labels_test[i][1]*1.2 101 | curr_dist_pred = prediction_dist[0][0]*1.2 102 | file.write(path + '\tlabel_focal\t' + str(curr_focal_label) + '\tprediction_focal\t' + str(curr_focal_pred) + '\tlabel_dist\t' + str(curr_dist_label) + '\tprediction_dist\t' + str(curr_dist_pred)+'\n') 103 | 104 | print('focal:') 105 | print(n_acc_focal) 106 | print(len(paths_test)) 107 | print(n_acc_focal*1.0/(len(paths_test)*1.0)) 108 | 109 | print('dist:') 110 | print(n_acc_dist) 111 | print(len(paths_test)) 112 | print(n_acc_dist * 1.0 / (len(paths_test) * 1.0)) 113 | file.close() 114 | -------------------------------------------------------------------------------- /prediction/Regression/Single_net/predict_regressor_dist_focal_to_textfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os, cv2, sys 4 | from keras.applications.inception_v3 import InceptionV3 5 | from keras.applications.imagenet_utils import preprocess_input 6 | from keras.models import Model 7 | from keras.layers import Dense, Flatten, Input 8 | from keras import optimizers 9 | import numpy as np 10 | import glob 11 | import tensorflow as tf 12 | from keras.backend.tensorflow_backend import set_session 13 | 14 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" 15 | os.environ["CUDA_VISIBLE_DEVICES"]="0" 16 | 17 | config = tf.ConfigProto() 18 | config.gpu_options.allow_growth = True 19 | config.allow_soft_placement = True 20 | set_session(tf.Session(config=config)) 21 | 22 | IMAGE_FILE_PATH_DISTORTED = "" 23 | path_to_weights = 'weights_10_0.02.h5' 24 | IMAGE_SIZE = 299 25 | INPUT_SIZE = 299 26 | 27 | filename_results = 'results.txt' 28 | 29 | if os.path.exists(filename_results): 30 | sys.exit("file exists") 31 | 32 | focal_start = 40 33 | focal_end = 500 34 | classes_focal = list(np.arange(focal_start, focal_end+1, 10)) 35 | classes_distortion = list(np.arange(0, 61, 1) / 50.) 36 | 37 | def get_paths(IMAGE_FILE_PATH_DISTORTED): 38 | 39 | paths_test = glob.glob(IMAGE_FILE_PATH_DISTORTED + "*.jpg") 40 | paths_test.sort() 41 | parameters = [] 42 | labels_focal_test = [] 43 | for path in paths_test: 44 | curr_parameter = float((path.split('_f_'))[1].split('_d_')[0]) 45 | labels_focal_test.append(curr_parameter) 46 | labels_distortion_test = [] 47 | for path in paths_test: 48 | curr_parameter = float((path.split('_d_'))[1].split('.jpg')[0]) 49 | labels_distortion_test.append(curr_parameter) 50 | 51 | c = list(zip(paths_test, labels_focal_test, labels_distortion_test)) 52 | paths_test, labels_focal_test, labels_distortion_test = zip(*c) 53 | paths_test, labels_focal_test, labels_distortion_test = list(paths_test), list(labels_focal_test), list( 54 | labels_distortion_test) 55 | labels_test = [list(a) for a in zip(labels_focal_test, labels_distortion_test)] 56 | 57 | return paths_test, labels_test 58 | 59 | paths_test, labels_test = get_paths(IMAGE_FILE_PATH_DISTORTED) 60 | 61 | print(len(paths_test), 'test samples') 62 | 63 | with tf.device('/gpu:0'): 64 | input_shape = (299, 299, 3) 65 | main_input = Input(shape=input_shape, dtype='float32', name='main_input') 66 | phi_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=main_input, input_shape=input_shape) 67 | phi_features = phi_model.output 68 | phi_flattened = Flatten(name='phi-flattened')(phi_features) 69 | final_output_focal = Dense(1, activation='sigmoid', name='output_focal')(phi_flattened) 70 | final_output_distortion = Dense(1, activation='sigmoid', name='output_distortion')(phi_flattened) 71 | 72 | layer_index = 0 73 | for layer in phi_model.layers: 74 | layer.name = layer.name + "_phi" 75 | 76 | model = Model(input=main_input, output=[final_output_focal, final_output_distortion]) 77 | model.load_weights(path_to_weights) 78 | 79 | n_acc_focal = 0 80 | n_acc_dist = 0 81 | print(len(paths_test)) 82 | file = open(filename_results, 'a') 83 | for i, path in enumerate(paths_test): 84 | if i % 1000 == 0: 85 | print(i,' ',len(paths_test)) 86 | image = cv2.imread(path) 87 | image = cv2.resize(image,(INPUT_SIZE,INPUT_SIZE)) 88 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 89 | image = image / 255. 90 | image = image - 0.5 91 | image = image * 2. 92 | image = np.expand_dims(image,0) 93 | 94 | image = preprocess_input(image) 95 | 96 | # loop 97 | prediction_focal = model.predict(image)[0] 98 | prediction_dist = model.predict(image)[1] 99 | 100 | if np.argmax(prediction_focal[0]) == labels_test[i][0]: 101 | n_acc_focal = n_acc_focal + 1 102 | if np.argmax(prediction_dist[0]) == labels_test[i][1]: 103 | n_acc_dist = n_acc_dist + 1 104 | 105 | curr_focal_label = labels_test[i][0] 106 | curr_focal_pred = (prediction_focal[0][0] * (focal_end+1. - focal_start*1.) + focal_start*1. ) * (IMAGE_SIZE*1.0) / (INPUT_SIZE*1.0) 107 | curr_dist_label = labels_test[i][1] 108 | curr_dist_pred = prediction_dist[0][0]*1.2 109 | file.write(path + '\tlabel_focal\t' + str(curr_focal_label) + '\tprediction_focal\t' + str(curr_focal_pred) + '\tlabel_dist\t' + str(curr_dist_label) + '\tprediction_dist\t' + str(curr_dist_pred)+'\n') 110 | 111 | print('focal:') 112 | print(n_acc_focal) 113 | print(len(paths_test)) 114 | print(n_acc_focal*1.0/(len(paths_test)*1.0)) 115 | 116 | print('dist:') 117 | print(n_acc_dist) 118 | print(len(paths_test)) 119 | print(n_acc_dist * 1.0 / (len(paths_test) * 1.0)) 120 | file.close() 121 | -------------------------------------------------------------------------------- /undistortion/undistSphIm.m: -------------------------------------------------------------------------------- 1 | function Image_und = undistSphIm(Idis, Paramsd, Paramsund) 2 | 3 | %Paramsund=Paramsd; 4 | Paramsund.W=Paramsd.W*3; % size of output (undist) 5 | Paramsund.H=Paramsd.H*3; 6 | %Paramsund.f=Paramsd.f; 7 | 8 | 9 | %Parameters of the camera to generate 10 | f_dist = Paramsd.f; 11 | u0_dist = Paramsd.W/2; 12 | v0_dist = Paramsd.H/2; 13 | % 14 | f_undist = Paramsund.f; 15 | u0_undist = Paramsund.W/2; 16 | v0_undist = Paramsund.H/2; 17 | xi = Paramsd.xi; % distortion parameters (spherical model) 18 | [Imd.H, Imd.W, ~] = size(Idis); 19 | 20 | tic 21 | % 1. Projection on the image 22 | [grid_x, grid_y] = meshgrid(1:Paramsund.W,1:Paramsund.H); 23 | X_Cam = grid_x./f_undist - u0_undist/f_undist; 24 | Y_Cam = grid_y./f_undist - v0_undist/f_undist; 25 | Z_Cam = ones(Paramsund.H,Paramsund.W); 26 | 27 | %2. Image to sphere cart 28 | xi1 = 0; 29 | alpha_cam = ( xi1.*Z_Cam + sqrt( Z_Cam.^2 + ... 30 | ( (1-xi1^2).*(X_Cam.^2 + Y_Cam.^2) ) ) ) ... 31 | ./ (X_Cam.^2 + Y_Cam.^2 + Z_Cam.^2); 32 | 33 | X_Sph = X_Cam.*alpha_cam; 34 | Y_Sph = Y_Cam.*alpha_cam; 35 | Z_Sph = (Z_Cam.*alpha_cam) - xi1; 36 | 37 | %3. reprojection on distorted 38 | den = xi*(sqrt(X_Sph.^2 + Y_Sph.^2 + Z_Sph.^2)) + Z_Sph; 39 | X_d = ((X_Sph*f_dist)./den)+u0_dist; 40 | Y_d = ((Y_Sph*f_dist)./den)+v0_dist; 41 | 42 | %4. Final step interpolation and mapping 43 | Image_und=zeros(Paramsund.H,Paramsund.W,3); 44 | for c=1:3 45 | Image_und(:,:,c) = interp2(im2double(Idis(:,:,c)), X_d, Y_d, 'cubic'); 46 | end 47 | toc; 48 | [Im_und.H, Im_und.W, ~] = size(Image_und); 49 | %ROI 50 | min(X_d(:)), max(X_d(:)); 51 | min(Y_d(:)), max(Y_d(:)); 52 | size(Idis); -------------------------------------------------------------------------------- /undistortion/undist_from_txt.m: -------------------------------------------------------------------------------- 1 | clear 2 | close all 3 | clc 4 | %% 5 | dist_folder = 'folder_to_save_undistorted_images' 6 | if ~exist(dist_folder, 'dir') 7 | mkdir(dist_folder); 8 | end 9 | 10 | fileID = fopen('path_to_txt_file','r'); 11 | file_content = textscan(fileID,'%s'); 12 | fclose(fileID); 13 | 14 | paths = file_content{1}; 15 | focal = file_content{2}; 16 | distortion = file_content{3}; 17 | 18 | hfig=figure; 19 | f = 0; 20 | dist = 0; 21 | 22 | for i=1:length(paths) 23 | 24 | Idis = imread(paths{i}); 25 | 26 | % xi = 1.08; 27 | xi = distortion(i); % distortion 28 | dist = dist + xi; 29 | [ImH,ImW,~] = size(Idis); 30 | % f_dist = 320 * (ImW/ImH) * (ImH/299); 31 | f_dist = focal(i) * (ImW/ImH) * (ImH/299); % focal length 32 | f = f + f_dist; 33 | u0_dist = ImW/2; 34 | v0_dist = ImH/2; 35 | Paramsd.f = f_dist; 36 | Paramsd.W = u0_dist*2; 37 | Paramsd.H = v0_dist*2; 38 | Paramsd.xi = xi; 39 | 40 | Paramsund.f = f_dist; 41 | Paramsund.W = u0_dist*2; 42 | Paramsund.H = v0_dist*2; 43 | 44 | tic 45 | Image_und = undistSphIm(Idis, Paramsd, Paramsund); 46 | toc 47 | 48 | %if (size(Image_und,1)~=0) 49 | paths_list = strsplit(paths{i}, '/'); 50 | res1 = strsplit(paths_list{9}, '_'); 51 | res2 = strsplit(res1{2}, '.') 52 | 53 | out = str2double(res2{1}); 54 | 55 | filename = [sprintf('%04d', out),'.jpg']; 56 | fullname = fullfile(dist_folder,filename); 57 | imwrite(Image_und,fullname); 58 | %end 59 | end --------------------------------------------------------------------------------