├── dataset ├── images │ ├── 0_0.jpg │ ├── 0_1.jpg │ ├── 0_2.jpg │ ├── 0_3.jpg │ ├── 0_4.jpg │ ├── 0_5.jpg │ ├── 0_6.jpg │ ├── 0_7.jpg │ ├── 0_8.jpg │ ├── 0_9.jpg │ ├── 1_0.jpg │ ├── 1_1.jpg │ ├── 1_2.jpg │ ├── 1_3.jpg │ ├── 1_4.jpg │ ├── 1_5.jpg │ ├── 1_6.jpg │ ├── 1_7.jpg │ ├── 1_8.jpg │ ├── 1_9.jpg │ ├── 2_0.jpg │ ├── 2_1.jpg │ ├── 2_2.jpg │ ├── 2_3.jpg │ ├── 2_4.jpg │ ├── 2_5.jpg │ ├── 2_6.jpg │ ├── 2_7.jpg │ ├── 2_8.jpg │ ├── 2_9.jpg │ ├── 3_0.jpg │ ├── 3_1.jpg │ ├── 3_2.jpg │ ├── 3_3.jpg │ ├── 3_4.jpg │ ├── 3_5.jpg │ ├── 3_6.jpg │ ├── 3_7.jpg │ ├── 3_8.jpg │ ├── 3_9.jpg │ ├── 4_0.jpg │ ├── 4_1.jpg │ ├── 4_2.jpg │ ├── 4_3.jpg │ ├── 4_4.jpg │ ├── 4_5.jpg │ ├── 4_6.jpg │ ├── 4_7.jpg │ ├── 4_8.jpg │ └── 4_9.jpg └── config.yaml ├── sample_output ├── 1_all.gif ├── screenshot-1.png ├── screenshot-2.png └── stdout ├── viewer ├── images │ └── preloader.gif └── js │ ├── TrackballControls.js │ ├── OrbitControls.js │ └── journey.js ├── clean.sh ├── LICENSE.md ├── README.md └── run_all.py /dataset/images/0_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_0.jpg -------------------------------------------------------------------------------- /dataset/images/0_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_1.jpg -------------------------------------------------------------------------------- /dataset/images/0_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_2.jpg -------------------------------------------------------------------------------- /dataset/images/0_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_3.jpg -------------------------------------------------------------------------------- /dataset/images/0_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_4.jpg -------------------------------------------------------------------------------- /dataset/images/0_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_5.jpg -------------------------------------------------------------------------------- /dataset/images/0_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_6.jpg -------------------------------------------------------------------------------- /dataset/images/0_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_7.jpg -------------------------------------------------------------------------------- /dataset/images/0_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_8.jpg -------------------------------------------------------------------------------- /dataset/images/0_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/0_9.jpg -------------------------------------------------------------------------------- /dataset/images/1_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_0.jpg -------------------------------------------------------------------------------- /dataset/images/1_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_1.jpg -------------------------------------------------------------------------------- /dataset/images/1_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_2.jpg -------------------------------------------------------------------------------- /dataset/images/1_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_3.jpg -------------------------------------------------------------------------------- /dataset/images/1_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_4.jpg -------------------------------------------------------------------------------- /dataset/images/1_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_5.jpg -------------------------------------------------------------------------------- /dataset/images/1_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_6.jpg -------------------------------------------------------------------------------- /dataset/images/1_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_7.jpg -------------------------------------------------------------------------------- /dataset/images/1_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_8.jpg -------------------------------------------------------------------------------- /dataset/images/1_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/1_9.jpg -------------------------------------------------------------------------------- /dataset/images/2_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_0.jpg -------------------------------------------------------------------------------- /dataset/images/2_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_1.jpg -------------------------------------------------------------------------------- /dataset/images/2_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_2.jpg -------------------------------------------------------------------------------- /dataset/images/2_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_3.jpg -------------------------------------------------------------------------------- /dataset/images/2_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_4.jpg -------------------------------------------------------------------------------- /dataset/images/2_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_5.jpg -------------------------------------------------------------------------------- /dataset/images/2_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_6.jpg -------------------------------------------------------------------------------- /dataset/images/2_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_7.jpg -------------------------------------------------------------------------------- /dataset/images/2_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_8.jpg -------------------------------------------------------------------------------- /dataset/images/2_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/2_9.jpg -------------------------------------------------------------------------------- /dataset/images/3_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_0.jpg -------------------------------------------------------------------------------- /dataset/images/3_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_1.jpg -------------------------------------------------------------------------------- /dataset/images/3_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_2.jpg -------------------------------------------------------------------------------- /dataset/images/3_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_3.jpg -------------------------------------------------------------------------------- /dataset/images/3_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_4.jpg -------------------------------------------------------------------------------- /dataset/images/3_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_5.jpg -------------------------------------------------------------------------------- /dataset/images/3_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_6.jpg -------------------------------------------------------------------------------- /dataset/images/3_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_7.jpg -------------------------------------------------------------------------------- /dataset/images/3_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_8.jpg -------------------------------------------------------------------------------- /dataset/images/3_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/3_9.jpg -------------------------------------------------------------------------------- /dataset/images/4_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_0.jpg -------------------------------------------------------------------------------- /dataset/images/4_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_1.jpg -------------------------------------------------------------------------------- /dataset/images/4_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_2.jpg -------------------------------------------------------------------------------- /dataset/images/4_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_3.jpg -------------------------------------------------------------------------------- /dataset/images/4_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_4.jpg -------------------------------------------------------------------------------- /dataset/images/4_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_5.jpg -------------------------------------------------------------------------------- /dataset/images/4_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_6.jpg -------------------------------------------------------------------------------- /dataset/images/4_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_7.jpg -------------------------------------------------------------------------------- /dataset/images/4_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_8.jpg -------------------------------------------------------------------------------- /dataset/images/4_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/dataset/images/4_9.jpg -------------------------------------------------------------------------------- /sample_output/1_all.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/sample_output/1_all.gif -------------------------------------------------------------------------------- /viewer/images/preloader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/viewer/images/preloader.gif -------------------------------------------------------------------------------- /sample_output/screenshot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/sample_output/screenshot-1.png -------------------------------------------------------------------------------- /sample_output/screenshot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cberzan/highway-sfm/HEAD/sample_output/screenshot-2.png -------------------------------------------------------------------------------- /dataset/config.yaml: -------------------------------------------------------------------------------- 1 | # OpenSfM will use the default parameters from opensfm/config.py 2 | # Set here any parameter that you want to override for this dataset 3 | 4 | triangulation_min_ray_angle: 0.3 5 | -------------------------------------------------------------------------------- /clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Clean dataset dir. 4 | # (Script copied from the opensfm repo and modified.) 5 | # Usage: `./clean.sh opensfm-dataset-dir` 6 | 7 | trash=$1/trash/`date -u +"%Y-%m-%dT%H:%M:%SZ"` 8 | mkdir -p $trash 9 | 10 | mv -vf $1/reconstruction*.json $trash 11 | mv -vf $1/exif $trash 12 | mv -vf $1/matches $trash 13 | mv -vf $1/sift $trash 14 | mv -vf $1/surf $trash 15 | mv -vf $1/akaze* $trash 16 | mv -vf $1/root* $trash 17 | mv -vf $1/hahog $trash 18 | mv -vf $1/camera_models.json $trash 19 | mv -vf $1/reference_lla.json $trash 20 | mv -vf $1/profile.log $trash 21 | mv -vf $1/navigation_graph.json $trash 22 | mv -vf $1/plot_inliers $trash 23 | mv -vf $1/depthmaps $trash 24 | mv -vf $1/tracks.csv $trash 25 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The code in `run_all.py` is released under the MIT license: 2 | 3 | > Copyright (c) 2016 Constantin Berzan 4 | > 5 | > Permission is hereby granted, free of charge, to any person obtaining a copy 6 | > of this software and associated documentation files (the "Software"), to deal 7 | > in the Software without restriction, including without limitation the rights 8 | > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | > copies of the Software, and to permit persons to whom the Software is 10 | > furnished to do so, subject to the following conditions: 11 | > 12 | > The above copyright notice and this permission notice shall be included in all 13 | > copies or substantial portions of the Software. 14 | > 15 | > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | > SOFTWARE. 22 | 23 | The images in `dataset/images/` are publicly available 24 | [here](http://geohot.com/projective_challenge.tar.gz) and belong to their 25 | respective authors. 26 | 27 | The visualization code in `viewer/` is a modified version of the viewer from 28 | OpenSFM, and inherits their BSD license: 29 | 30 | > Copyright (c) 2014, mapillary 31 | > All rights reserved. 32 | > 33 | > Redistribution and use in source and binary forms, with or without 34 | > modification, are permitted provided that the following conditions are met: 35 | > 36 | > * Redistributions of source code must retain the above copyright notice, this 37 | > list of conditions and the following disclaimer. 38 | > 39 | > * Redistributions in binary form must reproduce the above copyright notice, 40 | > this list of conditions and the following disclaimer in the documentation 41 | > and/or other materials provided with the distribution. 42 | > 43 | > THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 44 | > AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 | > IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 46 | > DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 47 | > FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 | > DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 49 | > SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 50 | > CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 51 | > OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 | > OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Structure from motion (SFM) for highway driving scenes 2 | 3 | We are given five videos of driving the same stretch of highway. For example, 4 | one of the videos looks like this: 5 | 6 | ![video 1](sample_output/1_all.gif) 7 | 8 | The goal is to estimate the relative 3D pose of the camera in each frame of 9 | each video (up to scale). This repo shows a way to solve this problem using a 10 | custom SFM pipeline built on top of 11 | [OpenSFM](https://github.com/mapillary/OpenSfM). 12 | 13 | The screenshots below show what the final map looks like. The cameras are 14 | color-coded to identify each video: red for `0_*.jpg`, green for `1_*.jpg`, 15 | blue for `2_*.jpg`, yellow for `3_*.jpg`, and magenta for `4_*.jpg`. It can be 16 | seen clearly that the videos were taken from two different lanes. 17 | 18 | ![screenshot-1](sample_output/screenshot-1.png) 19 | 20 | ![screenshot-2](sample_output/screenshot-2.png) 21 | 22 | The following files are provided: 23 | 24 | dataset/ - contains the images from each video 25 | sample_output/ - example output reconstruction 26 | viewer/ - the viewer from opensfm, tweaked to color-code cameras 27 | README.md - you're reading it 28 | clean.sh - cleans a dataset 29 | run_all.py - runs the entire reconstruction pipeline on a dataset 30 | 31 | The code uses building blocks from OpenSFM, and has the [same 32 | dependencies](https://github.com/mapillary/OpenSfM#dependencies). 33 | 34 | To build the reconstruction, run: 35 | 36 | ./clean.sh dataset 37 | PYTHONPATH=$OPENSFM_INSTALL_DIR ./run_all.py dataset 38 | 39 | where `$OPENSFM_INSTALL_DIR` is wherever you installed OpenSFM. Note that 40 | because of RANSAC, you may get slightly different results each time you run the 41 | above. Two or three runs should be enough to get a reconstruction as good as 42 | the one provided in `sample_output/reconstruction.json` and shown in the 43 | screenshots above. 44 | 45 | To visualize the reconstruction, run: 46 | 47 | python -m SimpleHTTPServer 48 | 49 | then point your browser to 50 | [http://localhost:8000/viewer/reconstruction.html#file=/dataset/reconstruction.json](http://localhost:8000/viewer/reconstruction.html#file=/dataset/reconstruction.json). 51 | Hit refresh to read the latest reconstruction. If you just want to visualize 52 | the reconstruction provided in `sample_output`, go to 53 | [http://localhost:8000/viewer/reconstruction.html#file=/sample_output/reconstruction.json](http://localhost:8000/viewer/reconstruction.html#file=/sample_output/reconstruction.json) 54 | instead. 55 | 56 | 57 | 58 | ## How it works 59 | 60 | First, I wanted to see how far I could get by just using OpenSFM out of the 61 | box. It turned out that out-of-the-box OpenSFM produced a pretty bad 62 | reconstruction, so I dug in to figure out how to improve it. 63 | 64 | 65 | #### A sketch of the OpenSFM reconstruction pipeline 66 | 67 | Here is a sketch of OpenSFM's reconstruction pipeline, based on a quick 68 | reading of their code. I will discuss below how my pipeline differs from 69 | theirs. 70 | 71 | 1. Extract camera params and GPS coordinates from EXIF data -- not relevant 72 | since our images lack EXIF data. 73 | 74 | 1. Extract features from each image. 75 | 76 | 1. For each image, find candidate images to match with, based on GPS data -- in 77 | our case, since there is no GPS data, all image pairs are considered. 78 | 79 | 1. Optionally use a cheap preemptive matching to quickly discard the pair 80 | of images if they don't match very well. 81 | 1. Compute symmetric matches. These are feature pairs `(f1, f2)` in 82 | `image1` and `image2` respectively, where `f2` is the best match for 83 | `f1`, and vice versa, and the second-best match in each image is much 84 | worse. 85 | 1. Compute robust matches by finding a fundamental matrix that maps points 86 | from one image to the other, computing reprojection error w.r.t. this 87 | fundamental matrix, and discarding outliers. 88 | 89 | 1. Create tracks graph. A track is a feature that has been matched in 2 or more 90 | images. Each track has a unique id. (In the reconstruction, every track will 91 | become a point in 3D, triangulated from the images in which it appears.) 92 | 93 | The tracks graph is a bipartite graph with images on the left side and track 94 | ids on the right side. An edge between `image_i` and `track_j` indicates 95 | that the given track occurs in the given image. The edge stores the image 96 | coordinates and feature id of that correspondence. 97 | 98 | Note that symmetric feature matching is crucial for getting valid tracks. If 99 | `f1` in `image1` is matched to `f2` in `image2`, but `f2` in `image2` is 100 | matched to `f3 != f1` in `image1`, we would have an invalid track that goes 101 | through two different features in `image1`. 102 | 103 | 1. Bootstrap the 3D reconstruction. 104 | 105 | 1. Pick a pair of "shots" (a shot is an image taken from a given pose) 106 | `(image1, image2)` to bootstrap the reconstruction with. They do this by 107 | computing a homography between every pair of candidate images and 108 | choosing the pair with minimal reprojection error. 109 | 1. Set the pose of the first camera to `(R1=I, t1=0)`, and compute the 110 | relative pose of the second camera `(R2, t2)`. Note that `t` is 111 | ambiguous up to scale, but that will be taken care of in the bundle 112 | adjustment step below. 113 | 1. Triangulate all common tracks between `image1` and `image2`, adding them 114 | as 3D points to the reconstruction and removing any outliers. 115 | 1. Bundle-adjust the pose of the second camera to minimize reprojection 116 | error. 117 | 118 | 1. Iteratively "grow" the 3D reconstruction. 119 | 120 | 1. Pick the next shot `image3` to add to the reconstruction, preferring 121 | images that have a lot of tracks that are already present in the 122 | reconstruction built so far. 123 | 1. Solve for the absolute pose of the new shot, by using its 124 | correspondences with 3D points that are already present in the 125 | reconstruction. This is call resectioning. 126 | 1. Add the new shot to the scene and bundle-adjust it by itself. (This 127 | optimizes the pose of the new shot, holding everything else constant.) 128 | 1. Triangulate any new points that occur in `image3` and some image already 129 | in the reconstruction. 130 | 1. Bundle-adjust the whole reconstruction. (This optimizes the pose of all 131 | shots and the 3D location of all points, jointly.) 132 | 1. Pick the next `image4` and repeat. 133 | 134 | 1. If the above process did not use all images (e.g. because there were not 135 | enough common tracks), bootstrap a second reconstruction, and so on. 136 | 137 | 138 | #### Tweaks for our specific problem 139 | 140 | There are a few things I noticed right away that made the built-in OpenSFM 141 | pipeline less than ideal for our problem: 142 | 143 | - Without EXIF tags, they assume every shot was taken with the same camera, 144 | with unknown focal length and distortion parameters. In our case, each video 145 | was taken with a potentially different camera. 146 | 147 | - Without GPS, they compute feature matches between all pairs of images. This 148 | is slow, and can lead to incorrect tracks if e.g. a feature in `0_1.jpg` is 149 | matched to a feature in `0_9.jpg`, since the two frames are too far apart 150 | to have any reliable features in common. 151 | 152 | - They prune features very aggressively (symmetric matches based on second-best 153 | rule, then robust matches based on computing a fundamental matrix). This 154 | works fine when you have lots of images of the same scene, but in our case it 155 | leads to extreme feature sparsity. 156 | 157 | - The homography-based method to pick the two images to start the 158 | reconstruction with might work OK if the images are all of the same 159 | approximately planar scene, like the face of a building. But our images are 160 | not even of the same "scene", since each frame moves forward, and anyway, the 161 | scene is far from being planar. 162 | 163 | - It's tough to triangulate points when the second camera sits directly in 164 | front of the first camera, since the rays from the camera centers to the 165 | 3D point are nearly parallel. Lowering the `triangulation_min_ray_angle` 166 | parameter helps avoid discarding too many of these points. 167 | 168 | - OpenSFM assumes a static scene, but in our case there are other cars moving 169 | on the freeway. A crude workaround is to simply discard any features 170 | extracted from the road part of each image. This can be achieved using masks 171 | (regions of the image from which to discard any extracted features). 172 | 173 | Setting up masks and tuning `triangulation_min_ray_angle` was enough to 174 | reconstruct the `0_*.jpg` video using the OpenSFM pipeline. But adding more 175 | images from the other videos still produced a bad reconstruction. So I decided 176 | to write my own pipeline using building blocks from OpenSFM. 177 | 178 | 179 | #### Custom reconstruction pipeline 180 | 181 | Here is how my pipeline differs from the OpenSFM pipeline: 182 | 183 | - Feature extraction: 184 | 185 | - Use masks to avoid bad image areas (road with moving cars, windshield 186 | dead zones). 187 | 188 | - Feature matching: 189 | 190 | - Don't match every image pair. Instead, match each image (e.g. `2_5.jpg`) 191 | to its previous and next frame in the same video (`2_4.jpg` and 192 | `2_6.jpg`), and to the corresponding frame in all other videos 193 | (`0_5.jpg`, `1_5.jpg`, etc). This works well because the videos are more 194 | or less aligned (i.e. the car is moving at roughly the same speed, and 195 | the sampling frequency is roughly the same). For a more general approach, 196 | it would be better to match images and add them to the tracks graph 197 | dynamically, instead of in a preprocessing step. 198 | - Use the symmetric matches directly; don't prune them any further with 199 | their "robust matching" technique. 200 | 201 | Note: Even symmetric matches can be too sparse; e.g. `1_8.jpg` and 202 | `1_9.jpg` have only 18 symmetric matches. And only a subset of those 203 | might appear in the reconstruction. I tried several techniques to get 204 | more matches: 205 | 206 | - Increase `lowes_ratio` to make the second-best pruning rule less 207 | aggressive. 208 | - Write my custom matcher `match_custom` to avoid the second-best 209 | pruning altogether. I left this in the code so you can see how it 210 | works, but it did not end up improving the results. 211 | 212 | In the end, what worked best was using the original symmetric matches, 213 | but making sure that several videos were reconstructed together. For 214 | example, when reconstructing video `1_*.jpg` by itself, `1_8.jpg` and 215 | `1_9.jpg` have 18 symmetric matches and only 4 of them occur in the 216 | reconstruction before the `1_9.jpg` shot is added, so getting a good pose 217 | for `1_9.jpg` is really hard. When reconstructing all videos together, 218 | `1_8.jpg` and `1_9.jpg` still have only 18 symmetric matches, but 13 of 219 | them occur in the reconstruction, making the pose estimation much easier. 220 | 221 | - Reconstruction: 222 | 223 | - Give each video a separate set of camera parameters, but make all frames 224 | in the same video share the same parameters. This allows the bundle 225 | adjustment step to find different focal lengths and distortion parameters 226 | for the different cameras. 227 | 228 | - Use a custom image pair to start with, and a custom order in which to 229 | "grow" (add images to) the reconstruction. I tried several approaches 230 | here (see `get_reconstruction_order`), and settled on a predetermined 231 | heuristic order. For example, the `0_*.jpg` video is pretty easy to 232 | reconstruct, so it helps to reconstruct it entirely before adding shots 233 | from other videos. 234 | 235 | - Avoid the complicated resectioning that OpenSFM does. In the bootstrap 236 | step, OpenSFM solves a [relative pose 237 | problem](https://github.com/mapillary/OpenSfM/blob/0343d7950bb584b678f0d8fd8404fa38947879e8/opensfm/reconstruction.py#L249), 238 | but in the grow step it solves an [absolute pose 239 | problem](https://github.com/mapillary/OpenSfM/blob/0343d7950bb584b678f0d8fd8404fa38947879e8/opensfm/reconstruction.py#L406). 240 | I wrote my pipeline to solve a relative pose problem in both cases, thus 241 | making it more simple, reducing the number of building blocks from OpenGV 242 | that I need to use, and making the `hint_forward` trick (described below) 243 | possible. The downside is that each new shot being added to the 244 | reconstruction needs a reference shot (already in the reconstruction) 245 | from which to compute its relative pose. This is easy in our case, since 246 | the frames in each video have a natural ordering. 247 | 248 | - Take advantage of the fact that the camera moves forward from frame `i` 249 | to frame `i+1`, with very little rotation, and a translation that is 250 | mostly along the z axis. When computing the pose of successive frames 251 | from the same video, I pass `hint_forward=True` into my custom 252 | reconstruction routine. This causes it to retry finding the relative pose 253 | between the two views (using RANSAC) until it finds a transformation that 254 | agrees with the z-axis motion described above. 255 | 256 | Note: This is a rather crude approach to get the job done quickly, and 257 | there are more elegant ways of handling this: 258 | 259 | - Assume an identity rotation between the two views, and solve for 260 | translation only. OpenGV [supports 261 | this](https://github.com/laurentkneip/opengv/blob/be7a676f1ffb26072110a5858bf746d1c8eb5d28/include/opengv/sac_problems/relative_pose/TranslationOnlySacProblem.hpp#L62-L68), 262 | but unfortunately it [does not make this functionality available 263 | through its Python 264 | API](https://github.com/laurentkneip/opengv/blob/be7a676f1ffb26072110a5858bf746d1c8eb5d28/python/pyopengv.cpp#L504-L507). Of course, we could always just do it ourselves. 265 | 266 | - Add the appropriate [rotation and translation 267 | priors](https://github.com/mapillary/OpenSfM/blob/0343d7950bb584b678f0d8fd8404fa38947879e8/opensfm/src/bundle.h#L611-L639) 268 | to the bundle adjustment problem, so that it appropriately penalizes 269 | transformations that disagree with `hint_forward`. 270 | 271 | This is enough to solve the given problem, but there are lots of ways to make 272 | the pipeline more robust. I described some of them above, and left some as 273 | FIXMEs in the code. 274 | -------------------------------------------------------------------------------- /viewer/js/TrackballControls.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Eberhard Graether / http://egraether.com/ 3 | * @author Mark Lundin / http://mark-lundin.com 4 | */ 5 | 6 | THREE.TrackballControls = function ( object, domElement ) { 7 | 8 | var _this = this; 9 | var STATE = { NONE: -1, ROTATE: 0, ZOOM: 1, PAN: 2, TOUCH_ROTATE: 3, TOUCH_ZOOM: 4, TOUCH_PAN: 5 }; 10 | 11 | this.object = object; 12 | this.domElement = ( domElement !== undefined ) ? domElement : document; 13 | 14 | // API 15 | 16 | this.enabled = true; 17 | 18 | this.screen = { left: 0, top: 0, width: 0, height: 0 }; 19 | 20 | this.rotateSpeed = 1.0; 21 | this.zoomSpeed = 1.2; 22 | this.panSpeed = 0.3; 23 | 24 | this.noRotate = false; 25 | this.noZoom = false; 26 | this.noPan = false; 27 | this.noRoll = false; 28 | 29 | this.staticMoving = false; 30 | this.dynamicDampingFactor = 0.2; 31 | 32 | this.minDistance = 0; 33 | this.maxDistance = Infinity; 34 | 35 | this.keys = [ 65 /*A*/, 83 /*S*/, 68 /*D*/ ]; 36 | 37 | // internals 38 | 39 | this.target = new THREE.Vector3(); 40 | 41 | var lastPosition = new THREE.Vector3(); 42 | 43 | var _state = STATE.NONE, 44 | _prevState = STATE.NONE, 45 | 46 | _eye = new THREE.Vector3(), 47 | 48 | _rotateStart = new THREE.Vector3(), 49 | _rotateEnd = new THREE.Vector3(), 50 | 51 | _zoomStart = new THREE.Vector2(), 52 | _zoomEnd = new THREE.Vector2(), 53 | 54 | _touchZoomDistanceStart = 0, 55 | _touchZoomDistanceEnd = 0, 56 | 57 | _panStart = new THREE.Vector2(), 58 | _panEnd = new THREE.Vector2(); 59 | 60 | // for reset 61 | 62 | this.target0 = this.target.clone(); 63 | this.position0 = this.object.position.clone(); 64 | this.up0 = this.object.up.clone(); 65 | 66 | // events 67 | 68 | var changeEvent = { type: 'change' }; 69 | var startEvent = { type: 'start'}; 70 | var endEvent = { type: 'end'}; 71 | 72 | 73 | // methods 74 | 75 | this.handleResize = function () { 76 | 77 | if ( this.domElement === document ) { 78 | 79 | this.screen.left = 0; 80 | this.screen.top = 0; 81 | this.screen.width = window.innerWidth; 82 | this.screen.height = window.innerHeight; 83 | 84 | } else { 85 | 86 | this.screen = this.domElement.getBoundingClientRect(); 87 | // adjustments come from similar code in the jquery offset() function 88 | var d = this.domElement.ownerDocument.documentElement 89 | this.screen.left += window.pageXOffset - d.clientLeft 90 | this.screen.top += window.pageYOffset - d.clientTop 91 | 92 | } 93 | 94 | }; 95 | 96 | this.handleEvent = function ( event ) { 97 | 98 | if ( typeof this[ event.type ] == 'function' ) { 99 | 100 | this[ event.type ]( event ); 101 | 102 | } 103 | 104 | }; 105 | 106 | this.getMouseOnScreen = function ( pageX, pageY, optionalTarget ) { 107 | 108 | return ( optionalTarget || new THREE.Vector2() ).set( 109 | ( pageX - _this.screen.left ) / _this.screen.width, 110 | ( pageY - _this.screen.top ) / _this.screen.height 111 | ); 112 | 113 | }; 114 | 115 | this.getMouseProjectionOnBall = (function(){ 116 | 117 | var objectUp = new THREE.Vector3(); 118 | 119 | 120 | return function ( pageX, pageY, projection ) { 121 | 122 | var mouseOnBall = new THREE.Vector3( 123 | ( pageX - _this.screen.width * 0.5 - _this.screen.left ) / (_this.screen.width*.5), 124 | ( _this.screen.height * 0.5 + _this.screen.top - pageY ) / (_this.screen.height*.5), 125 | 0.0 126 | ); 127 | 128 | var length = mouseOnBall.length(); 129 | 130 | if ( _this.noRoll ) { 131 | 132 | if ( length < Math.SQRT1_2 ) { 133 | 134 | mouseOnBall.z = Math.sqrt( 1.0 - length*length ); 135 | 136 | } else { 137 | 138 | mouseOnBall.z = .5 / length; 139 | 140 | } 141 | 142 | } else if ( length > 1.0 ) { 143 | 144 | mouseOnBall.normalize(); 145 | 146 | } else { 147 | 148 | mouseOnBall.z = Math.sqrt( 1.0 - length * length ); 149 | 150 | } 151 | 152 | _eye.copy( _this.object.position ).sub( _this.target ); 153 | 154 | projection.copy( _this.object.up ).setLength( mouseOnBall.y ) 155 | projection.add( objectUp.copy( _this.object.up ).cross( _eye ).setLength( mouseOnBall.x ) ); 156 | projection.add( _eye.setLength( mouseOnBall.z ) ); 157 | 158 | return projection; 159 | } 160 | 161 | }()); 162 | 163 | this.rotateCamera = (function(){ 164 | 165 | var axis = new THREE.Vector3(), 166 | quaternion = new THREE.Quaternion(); 167 | 168 | 169 | return function () { 170 | 171 | var angle = Math.acos( _rotateStart.dot( _rotateEnd ) / _rotateStart.length() / _rotateEnd.length() ); 172 | 173 | if ( angle ) { 174 | 175 | axis.crossVectors( _rotateStart, _rotateEnd ).normalize(); 176 | 177 | angle *= _this.rotateSpeed; 178 | 179 | quaternion.setFromAxisAngle( axis, -angle ); 180 | 181 | _eye.applyQuaternion( quaternion ); 182 | _this.object.up.applyQuaternion( quaternion ); 183 | 184 | _rotateEnd.applyQuaternion( quaternion ); 185 | 186 | if ( _this.staticMoving ) { 187 | 188 | _rotateStart.copy( _rotateEnd ); 189 | 190 | } else { 191 | 192 | quaternion.setFromAxisAngle( axis, angle * ( _this.dynamicDampingFactor - 1.0 ) ); 193 | _rotateStart.applyQuaternion( quaternion ); 194 | 195 | } 196 | 197 | } 198 | } 199 | 200 | }()); 201 | 202 | this.zoomCamera = function () { 203 | 204 | if ( _state === STATE.TOUCH_ZOOM ) { 205 | 206 | var factor = _touchZoomDistanceStart / _touchZoomDistanceEnd; 207 | _touchZoomDistanceStart = _touchZoomDistanceEnd; 208 | _eye.multiplyScalar( factor ); 209 | 210 | } else { 211 | 212 | var factor = 1.0 + ( _zoomEnd.y - _zoomStart.y ) * _this.zoomSpeed; 213 | 214 | if ( factor !== 1.0 && factor > 0.0 ) { 215 | 216 | _eye.multiplyScalar( factor ); 217 | 218 | if ( _this.staticMoving ) { 219 | 220 | _zoomStart.copy( _zoomEnd ); 221 | 222 | } else { 223 | 224 | _zoomStart.y += ( _zoomEnd.y - _zoomStart.y ) * this.dynamicDampingFactor; 225 | 226 | } 227 | 228 | } 229 | 230 | } 231 | 232 | }; 233 | 234 | this.panCamera = (function(){ 235 | 236 | var mouseChange = new THREE.Vector2(), 237 | objectUp = new THREE.Vector3(), 238 | pan = new THREE.Vector3(); 239 | 240 | return function () { 241 | 242 | mouseChange.copy( _panEnd ).sub( _panStart ); 243 | 244 | if ( mouseChange.lengthSq() ) { 245 | 246 | mouseChange.multiplyScalar( _eye.length() * _this.panSpeed ); 247 | 248 | pan.copy( _eye ).cross( _this.object.up ).setLength( mouseChange.x ); 249 | pan.add( objectUp.copy( _this.object.up ).setLength( mouseChange.y ) ); 250 | 251 | _this.object.position.add( pan ); 252 | _this.target.add( pan ); 253 | 254 | if ( _this.staticMoving ) { 255 | 256 | _panStart.copy( _panEnd ); 257 | 258 | } else { 259 | 260 | _panStart.add( mouseChange.subVectors( _panEnd, _panStart ).multiplyScalar( _this.dynamicDampingFactor ) ); 261 | 262 | } 263 | 264 | } 265 | } 266 | 267 | }()); 268 | 269 | this.checkDistances = function () { 270 | 271 | if ( !_this.noZoom || !_this.noPan ) { 272 | 273 | if ( _eye.lengthSq() > _this.maxDistance * _this.maxDistance ) { 274 | 275 | _this.object.position.addVectors( _this.target, _eye.setLength( _this.maxDistance ) ); 276 | 277 | } 278 | 279 | if ( _eye.lengthSq() < _this.minDistance * _this.minDistance ) { 280 | 281 | _this.object.position.addVectors( _this.target, _eye.setLength( _this.minDistance ) ); 282 | 283 | } 284 | 285 | } 286 | 287 | }; 288 | 289 | this.update = function () { 290 | 291 | _eye.subVectors( _this.object.position, _this.target ); 292 | 293 | if ( !_this.noRotate ) { 294 | 295 | _this.rotateCamera(); 296 | 297 | } 298 | 299 | if ( !_this.noZoom ) { 300 | 301 | _this.zoomCamera(); 302 | 303 | } 304 | 305 | if ( !_this.noPan ) { 306 | 307 | _this.panCamera(); 308 | 309 | } 310 | 311 | _this.object.position.addVectors( _this.target, _eye ); 312 | 313 | _this.checkDistances(); 314 | 315 | _this.object.lookAt( _this.target ); 316 | 317 | if ( lastPosition.distanceToSquared( _this.object.position ) > 0 ) { 318 | 319 | _this.dispatchEvent( changeEvent ); 320 | 321 | lastPosition.copy( _this.object.position ); 322 | 323 | } 324 | 325 | }; 326 | 327 | this.reset = function () { 328 | 329 | _state = STATE.NONE; 330 | _prevState = STATE.NONE; 331 | 332 | _this.target.copy( _this.target0 ); 333 | _this.object.position.copy( _this.position0 ); 334 | _this.object.up.copy( _this.up0 ); 335 | 336 | _eye.subVectors( _this.object.position, _this.target ); 337 | 338 | _this.object.lookAt( _this.target ); 339 | 340 | _this.dispatchEvent( changeEvent ); 341 | 342 | lastPosition.copy( _this.object.position ); 343 | 344 | }; 345 | 346 | // listeners 347 | 348 | function keydown( event ) { 349 | 350 | if ( _this.enabled === false ) return; 351 | 352 | window.removeEventListener( 'keydown', keydown ); 353 | 354 | _prevState = _state; 355 | 356 | if ( _state !== STATE.NONE ) { 357 | 358 | return; 359 | 360 | } else if ( event.keyCode === _this.keys[ STATE.ROTATE ] && !_this.noRotate ) { 361 | 362 | _state = STATE.ROTATE; 363 | 364 | } else if ( event.keyCode === _this.keys[ STATE.ZOOM ] && !_this.noZoom ) { 365 | 366 | _state = STATE.ZOOM; 367 | 368 | } else if ( event.keyCode === _this.keys[ STATE.PAN ] && !_this.noPan ) { 369 | 370 | _state = STATE.PAN; 371 | 372 | } 373 | 374 | } 375 | 376 | function keyup( event ) { 377 | 378 | if ( _this.enabled === false ) return; 379 | 380 | _state = _prevState; 381 | 382 | window.addEventListener( 'keydown', keydown, false ); 383 | 384 | } 385 | 386 | function mousedown( event ) { 387 | 388 | if ( _this.enabled === false ) return; 389 | 390 | event.preventDefault(); 391 | event.stopPropagation(); 392 | 393 | if ( _state === STATE.NONE ) { 394 | 395 | _state = event.button; 396 | 397 | } 398 | 399 | if ( _state === STATE.ROTATE && !_this.noRotate ) { 400 | 401 | _rotateStart = _this.getMouseProjectionOnBall( event.pageX, event.pageY, _rotateStart ); 402 | _rotateEnd.copy(_rotateStart) 403 | 404 | } else if ( _state === STATE.ZOOM && !_this.noZoom ) { 405 | 406 | _zoomStart = _this.getMouseOnScreen( event.pageX, event.pageY, _zoomStart ); 407 | _zoomEnd.copy(_zoomStart); 408 | 409 | } else if ( _state === STATE.PAN && !_this.noPan ) { 410 | 411 | _panStart = _this.getMouseOnScreen( event.pageX, event.pageY, _panStart); 412 | _panEnd.copy(_panStart) 413 | 414 | } 415 | 416 | document.addEventListener( 'mousemove', mousemove, false ); 417 | document.addEventListener( 'mouseup', mouseup, false ); 418 | _this.dispatchEvent( startEvent ); 419 | 420 | 421 | } 422 | 423 | function mousemove( event ) { 424 | 425 | if ( _this.enabled === false ) return; 426 | 427 | event.preventDefault(); 428 | event.stopPropagation(); 429 | 430 | if ( _state === STATE.ROTATE && !_this.noRotate ) { 431 | 432 | _rotateEnd = _this.getMouseProjectionOnBall( event.pageX, event.pageY, _rotateEnd ); 433 | 434 | } else if ( _state === STATE.ZOOM && !_this.noZoom ) { 435 | 436 | _zoomEnd = _this.getMouseOnScreen( event.pageX, event.pageY, _zoomEnd ); 437 | 438 | } else if ( _state === STATE.PAN && !_this.noPan ) { 439 | 440 | _panEnd = _this.getMouseOnScreen( event.pageX, event.pageY, _panEnd ); 441 | 442 | } 443 | 444 | } 445 | 446 | function mouseup( event ) { 447 | 448 | if ( _this.enabled === false ) return; 449 | 450 | event.preventDefault(); 451 | event.stopPropagation(); 452 | 453 | _state = STATE.NONE; 454 | 455 | document.removeEventListener( 'mousemove', mousemove ); 456 | document.removeEventListener( 'mouseup', mouseup ); 457 | _this.dispatchEvent( endEvent ); 458 | 459 | } 460 | 461 | function mousewheel( event ) { 462 | 463 | if ( _this.enabled === false ) return; 464 | 465 | event.preventDefault(); 466 | event.stopPropagation(); 467 | 468 | var delta = 0; 469 | 470 | if ( event.wheelDelta ) { // WebKit / Opera / Explorer 9 471 | 472 | delta = event.wheelDelta / 40; 473 | 474 | } else if ( event.detail ) { // Firefox 475 | 476 | delta = - event.detail / 3; 477 | 478 | } 479 | 480 | _zoomStart.y += delta * 0.01; 481 | _this.dispatchEvent( startEvent ); 482 | _this.dispatchEvent( endEvent ); 483 | 484 | } 485 | 486 | function touchstart( event ) { 487 | 488 | if ( _this.enabled === false ) return; 489 | 490 | switch ( event.touches.length ) { 491 | 492 | case 1: 493 | _state = STATE.TOUCH_ROTATE; 494 | _rotateEnd.copy( _this.getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _rotateStart )); 495 | break; 496 | 497 | case 2: 498 | _state = STATE.TOUCH_ZOOM; 499 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; 500 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; 501 | _touchZoomDistanceEnd = _touchZoomDistanceStart = Math.sqrt( dx * dx + dy * dy ); 502 | break; 503 | 504 | case 3: 505 | _state = STATE.TOUCH_PAN; 506 | _panEnd.copy( _this.getMouseOnScreen( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _panStart )); 507 | break; 508 | 509 | default: 510 | _state = STATE.NONE; 511 | 512 | } 513 | _this.dispatchEvent( startEvent ); 514 | 515 | 516 | } 517 | 518 | function touchmove( event ) { 519 | 520 | if ( _this.enabled === false ) return; 521 | 522 | event.preventDefault(); 523 | event.stopPropagation(); 524 | 525 | switch ( event.touches.length ) { 526 | 527 | case 1: 528 | _rotateEnd = _this.getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _rotateEnd ); 529 | break; 530 | 531 | case 2: 532 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; 533 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; 534 | _touchZoomDistanceEnd = Math.sqrt( dx * dx + dy * dy ) 535 | break; 536 | 537 | case 3: 538 | _panEnd = _this.getMouseOnScreen( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _panEnd ); 539 | break; 540 | 541 | default: 542 | _state = STATE.NONE; 543 | 544 | } 545 | 546 | } 547 | 548 | function touchend( event ) { 549 | 550 | if ( _this.enabled === false ) return; 551 | 552 | switch ( event.touches.length ) { 553 | 554 | case 1: 555 | _rotateStart.copy( _this.getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _rotateEnd )); 556 | break; 557 | 558 | case 2: 559 | _touchZoomDistanceStart = _touchZoomDistanceEnd = 0; 560 | break; 561 | 562 | case 3: 563 | _panStart.copy( _this.getMouseOnScreen( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY, _panEnd )); 564 | break; 565 | 566 | } 567 | 568 | _state = STATE.NONE; 569 | _this.dispatchEvent( endEvent ); 570 | 571 | } 572 | 573 | this.domElement.addEventListener( 'contextmenu', function ( event ) { event.preventDefault(); }, false ); 574 | 575 | this.domElement.addEventListener( 'mousedown', mousedown, false ); 576 | 577 | this.domElement.addEventListener( 'mousewheel', mousewheel, false ); 578 | this.domElement.addEventListener( 'DOMMouseScroll', mousewheel, false ); // firefox 579 | 580 | this.domElement.addEventListener( 'touchstart', touchstart, false ); 581 | this.domElement.addEventListener( 'touchend', touchend, false ); 582 | this.domElement.addEventListener( 'touchmove', touchmove, false ); 583 | 584 | window.addEventListener( 'keydown', keydown, false ); 585 | window.addEventListener( 'keyup', keyup, false ); 586 | 587 | this.handleResize(); 588 | 589 | }; 590 | 591 | THREE.TrackballControls.prototype = Object.create( THREE.EventDispatcher.prototype ); 592 | -------------------------------------------------------------------------------- /viewer/js/OrbitControls.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @author qiao / https://github.com/qiao 3 | * @author mrdoob / http://mrdoob.com 4 | * @author alteredq / http://alteredqualia.com/ 5 | * @author WestLangley / http://github.com/WestLangley 6 | * @author erich666 / http://erichaines.com 7 | */ 8 | /*global THREE, console */ 9 | 10 | // This set of controls performs orbiting, dollying (zooming), and panning. It maintains 11 | // the "up" direction as +Y, unlike the TrackballControls. Touch on tablet and phones is 12 | // supported. 13 | // 14 | // Orbit - left mouse / touch: one finger move 15 | // Zoom - middle mouse, or mousewheel / touch: two finger spread or squish 16 | // Pan - right mouse, or arrow keys / touch: three finter swipe 17 | // 18 | // This is a drop-in replacement for (most) TrackballControls used in examples. 19 | // That is, include this js file and wherever you see: 20 | // controls = new THREE.TrackballControls( camera ); 21 | // controls.target.z = 150; 22 | // Simple substitute "OrbitControls" and the control should work as-is. 23 | 24 | THREE.OrbitControls = function ( object, domElement ) { 25 | 26 | this.object = object; 27 | this.domElement = ( domElement !== undefined ) ? domElement : document; 28 | 29 | // API 30 | 31 | // Set to false to disable this control 32 | this.enabled = true; 33 | 34 | // "target" sets the location of focus, where the control orbits around 35 | // and where it pans with respect to. 36 | this.target = new THREE.Vector3(); 37 | 38 | this.animationTarget = new THREE.Vector3(); 39 | this.animationPosition = new THREE.Vector3(); 40 | this.animationPosition.copy(object.position); 41 | this.animationSpeed = 0.1; 42 | 43 | // This option actually enables dollying in and out; left as "zoom" for 44 | // backwards compatibility 45 | this.noZoom = false; 46 | this.zoomSpeed = 1.0; 47 | 48 | // Limits to how far you can dolly in and out 49 | this.minDistance = 0; 50 | this.maxDistance = Infinity; 51 | 52 | // Set to true to disable this control 53 | this.noRotate = false; 54 | this.rotateSpeed = 1.0; 55 | this.noLookAround = false; 56 | this.lookAroundSpeed = 0.5; 57 | 58 | // Set to true to disable this control 59 | this.noPan = false; 60 | this.keyPanSpeed = 7.0; // pixels moved per arrow key push 61 | 62 | // Set to true to disable use of the keys 63 | this.noKeys = false; 64 | 65 | // The four arrow keys 66 | this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 }; 67 | 68 | // Mouse buttons 69 | this.mouseButtons = { ORBIT: THREE.MOUSE.LEFT, ZOOM: THREE.MOUSE.MIDDLE, PAN: THREE.MOUSE.RIGHT }; 70 | 71 | //////////// 72 | // internals 73 | 74 | var scope = this; 75 | 76 | var EPS = 0.000001; 77 | 78 | var rotateStart = new THREE.Vector2(); 79 | var rotateEnd = new THREE.Vector2(); 80 | var rotateDelta = new THREE.Vector2(); 81 | 82 | var lookAroundStart = new THREE.Vector2(); 83 | var lookAroundEnd = new THREE.Vector2(); 84 | var lookAroundDelta = new THREE.Vector2(); 85 | 86 | var panStart = new THREE.Vector2(); 87 | var panEnd = new THREE.Vector2(); 88 | var panDelta = new THREE.Vector2(); 89 | var panOffset = new THREE.Vector3(); 90 | 91 | var offset = new THREE.Vector3(); 92 | 93 | var dollyStart = new THREE.Vector2(); 94 | var dollyEnd = new THREE.Vector2(); 95 | var dollyDelta = new THREE.Vector2(); 96 | 97 | var phiDelta = 0; 98 | var thetaDelta = 0; 99 | var laPhiDelta = 0; 100 | var laThetaDelta = 0; 101 | var scale = 1; 102 | var pan = new THREE.Vector3(); 103 | 104 | var lastPosition = new THREE.Vector3(); 105 | var lastQuaternion = new THREE.Quaternion(); 106 | 107 | var STATE = { NONE : -1, ROTATE : 0, DOLLY : 1, PAN : 2, TOUCH_ROTATE : 3, TOUCH_DOLLY : 4, TOUCH_PAN : 5, LOOK_AROUND: 6 }; 108 | 109 | var state = STATE.NONE; 110 | 111 | // for reset 112 | 113 | this.target0 = this.target.clone(); 114 | this.position0 = this.object.position.clone(); 115 | 116 | // events 117 | 118 | var changeEvent = { type: 'change' }; 119 | var startEvent = { type: 'start'}; 120 | var endEvent = { type: 'end'}; 121 | 122 | this.rotateLeft = function ( angle ) { 123 | thetaDelta -= angle; 124 | }; 125 | 126 | this.rotateUp = function ( angle ) { 127 | phiDelta -= angle; 128 | }; 129 | 130 | this.lookAroundLeft = function ( angle ) { 131 | laThetaDelta += angle; 132 | }; 133 | 134 | this.lookAroundUp = function ( angle ) { 135 | laPhiDelta -= angle; 136 | }; 137 | 138 | // pass in distance in world space to move left 139 | this.panLeft = function ( distance ) { 140 | // get X column of matrix 141 | var te = this.object.matrix.elements; 142 | panOffset.set( te[ 0 ], te[ 1 ], te[ 2 ] ); 143 | panOffset.multiplyScalar( - distance ); 144 | pan.add( panOffset ); 145 | }; 146 | 147 | // pass in distance in world space to move up 148 | this.panUp = function ( distance ) { 149 | // get Y column of matrix 150 | var te = this.object.matrix.elements; 151 | panOffset.set( te[ 4 ], te[ 5 ], te[ 6 ] ); 152 | panOffset.multiplyScalar( distance ); 153 | pan.add( panOffset ); 154 | }; 155 | 156 | // pass in x,y of change desired in pixel space, 157 | // right and down are positive 158 | this.pan = function ( deltaX, deltaY ) { 159 | var element = scope.domElement === document ? scope.domElement.body : scope.domElement; 160 | 161 | if ( scope.object.fov !== undefined ) { 162 | // perspective 163 | var position = scope.object.position; 164 | var offset = position.clone().sub( scope.target ); 165 | 166 | // half of the fov is center to top of screen 167 | var targetDistance = offset.length() * Math.tan( ( scope.object.fov / 2 ) * Math.PI / 180.0 ); 168 | 169 | // we actually don't use screenWidth, since perspective camera is fixed to screen height 170 | scope.panLeft( 2 * deltaX * targetDistance / element.clientHeight ); 171 | scope.panUp( 2 * deltaY * targetDistance / element.clientHeight ); 172 | } else if ( scope.object.top !== undefined ) { 173 | // orthographic 174 | scope.panLeft( deltaX * (scope.object.right - scope.object.left) / element.clientWidth ); 175 | scope.panUp( deltaY * (scope.object.top - scope.object.bottom) / element.clientHeight ); 176 | } else { 177 | // camera neither orthographic or perspective 178 | console.warn( 'WARNING: OrbitControls.js encountered an unknown camera type - pan disabled.' ); 179 | } 180 | 181 | }; 182 | 183 | this.dollyIn = function (dollyScale) { 184 | scale /= dollyScale; 185 | }; 186 | 187 | this.dollyOut = function ( dollyScale ) { 188 | scale *= dollyScale; 189 | }; 190 | 191 | this.updateAnimationTargetsMouse = function () { 192 | // Handle look around. 193 | offset.copy(this.animationTarget).sub(this.animationPosition); 194 | var laTheta = Math.atan2(offset.y, offset.x); 195 | var laPhi = Math.atan2(Math.sqrt(offset.x * offset.x + offset.y * offset.y), offset.z); 196 | laTheta += laThetaDelta; 197 | laPhi += laPhiDelta; 198 | laPhi = Math.max(EPS, Math.min(Math.PI - EPS, laPhi)); 199 | 200 | // Compute new target position 201 | var radius = offset.length(); 202 | offset.x = radius * Math.sin(laPhi) * Math.cos(laTheta); 203 | offset.y = radius * Math.sin(laPhi) * Math.sin(laTheta); 204 | offset.z = radius * Math.cos(laPhi); 205 | this.animationTarget.copy(this.animationPosition).add(offset); 206 | 207 | 208 | offset.copy(this.animationPosition).sub(this.animationTarget); 209 | 210 | // Rotate 211 | var theta = Math.atan2(offset.y, offset.x); 212 | var phi = Math.atan2(Math.sqrt(offset.x * offset.x + offset.y * offset.y), offset.z); 213 | theta += thetaDelta; 214 | phi += phiDelta; 215 | phi = Math.max(EPS, Math.min(Math.PI - EPS, phi)); 216 | 217 | // Dolly 218 | radius = offset.length() * scale; 219 | radius = Math.max(this.minDistance, Math.min(this.maxDistance, radius)); 220 | 221 | this.animationTarget.add(pan); 222 | 223 | // Compute new camera position 224 | offset.x = radius * Math.sin(phi) * Math.cos(theta); 225 | offset.y = radius * Math.sin(phi) * Math.sin(theta); 226 | offset.z = radius * Math.cos(phi); 227 | this.animationPosition.copy(this.animationTarget).add(offset); 228 | 229 | // Reset deltas 230 | thetaDelta = 0; 231 | phiDelta = 0; 232 | laThetaDelta = 0; 233 | laPhiDelta = 0; 234 | scale = 1; 235 | pan.set(0, 0, 0); 236 | }; 237 | 238 | this.update = function () { 239 | this.updateAnimationTargetsMouse(); 240 | 241 | this.target.lerp(this.animationTarget, this.animationSpeed); 242 | this.object.position.lerp(this.animationPosition, this.animationSpeed); 243 | this.object.lookAt(this.target); 244 | 245 | // update condition is: 246 | // min(camera displacement, camera rotation in radians)^2 > EPS 247 | // using small-angle approximation cos(x/2) = 1 - x^2 / 8 248 | if ( lastPosition.distanceToSquared( this.object.position ) > EPS 249 | || 8 * (1 - lastQuaternion.dot(this.object.quaternion)) > EPS ) { 250 | 251 | this.dispatchEvent( changeEvent ); 252 | 253 | lastPosition.copy( this.object.position ); 254 | lastQuaternion.copy (this.object.quaternion ); 255 | } 256 | }; 257 | 258 | this.goto_shot = function(cam, shot) { 259 | this.animationPosition.copy(opticalCenter(shot)); 260 | var offset = pixelToVertex(cam, shot, 0, 0, 20); 261 | this.animationTarget.copy(offset); 262 | }; 263 | 264 | this.goto = function(position, target) { 265 | this.animationPosition.copy(position); 266 | this.animationTarget.copy(target); 267 | }; 268 | 269 | this.reset = function () { 270 | state = STATE.NONE; 271 | this.target.copy( this.target0 ); 272 | this.object.position.copy( this.position0 ); 273 | this.update(); 274 | }; 275 | 276 | function getZoomScale() { 277 | return Math.pow( 0.95, scope.zoomSpeed ); 278 | } 279 | 280 | function onMouseDown( event ) { 281 | if ( scope.enabled === false ) return; 282 | event.preventDefault(); 283 | 284 | if ( event.button === scope.mouseButtons.ORBIT ) { 285 | if (event.shiftKey) { 286 | if (scope.noLookAround === true) return; 287 | state = STATE.LOOK_AROUND; 288 | lookAroundStart.set(event.clientX, event.clientY); 289 | } else { 290 | if (scope.noRotate === true) return; 291 | state = STATE.ROTATE; 292 | rotateStart.set(event.clientX, event.clientY); 293 | } 294 | } else if ( event.button === scope.mouseButtons.ZOOM ) { 295 | if ( scope.noZoom === true ) return; 296 | state = STATE.DOLLY; 297 | dollyStart.set( event.clientX, event.clientY ); 298 | 299 | } else if ( event.button === scope.mouseButtons.PAN ) { 300 | if ( scope.noPan === true ) return; 301 | state = STATE.PAN; 302 | panStart.set( event.clientX, event.clientY ); 303 | } 304 | 305 | document.addEventListener( 'mousemove', onMouseMove, false ); 306 | document.addEventListener( 'mouseup', onMouseUp, false ); 307 | document.addEventListener( 'keydown', onKeyDown, false ); 308 | scope.dispatchEvent( startEvent ); 309 | } 310 | 311 | function onMouseMove( event ) { 312 | 313 | if ( scope.enabled === false ) return; 314 | 315 | event.preventDefault(); 316 | 317 | var element = scope.domElement === document ? scope.domElement.body : scope.domElement; 318 | 319 | if ( state === STATE.ROTATE ) { 320 | if ( scope.noRotate === true ) return; 321 | 322 | rotateEnd.set( event.clientX, event.clientY ); 323 | rotateDelta.subVectors( rotateEnd, rotateStart ); 324 | 325 | // rotating across whole screen goes 360 degrees around 326 | scope.rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed ); 327 | 328 | // rotating up and down along whole screen attempts to go 360, but limited to 180 329 | scope.rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed ); 330 | 331 | rotateStart.copy( rotateEnd ); 332 | 333 | } else if ( state === STATE.LOOK_AROUND ) { 334 | if (scope.noRotate === true) return; 335 | 336 | lookAroundEnd.set(event.clientX, event.clientY); 337 | lookAroundDelta.subVectors(lookAroundEnd, lookAroundStart); 338 | 339 | // rotating across whole screen goes 360 degrees around 340 | scope.lookAroundLeft(2 * Math.PI * lookAroundDelta.x / element.clientWidth * scope.lookAroundSpeed); 341 | 342 | // rotating up and down along whole screen attempts to go 360, but limited to 180 343 | scope.lookAroundUp(2 * Math.PI * lookAroundDelta.y / element.clientWidth * scope.lookAroundSpeed); 344 | 345 | lookAroundStart.copy(lookAroundEnd); 346 | 347 | } else if ( state === STATE.DOLLY ) { 348 | 349 | if ( scope.noZoom === true ) return; 350 | 351 | dollyEnd.set( event.clientX, event.clientY ); 352 | dollyDelta.subVectors( dollyEnd, dollyStart ); 353 | 354 | if ( dollyDelta.y > 0 ) { 355 | 356 | scope.dollyIn(getZoomScale()); 357 | 358 | } else { 359 | 360 | scope.dollyOut(getZoomScale()); 361 | 362 | } 363 | 364 | dollyStart.copy( dollyEnd ); 365 | 366 | } else if ( state === STATE.PAN ) { 367 | 368 | if ( scope.noPan === true ) return; 369 | 370 | panEnd.set( event.clientX, event.clientY ); 371 | panDelta.subVectors( panEnd, panStart ); 372 | 373 | scope.pan( panDelta.x, panDelta.y ); 374 | 375 | panStart.copy( panEnd ); 376 | 377 | } 378 | 379 | scope.update(); 380 | 381 | } 382 | 383 | function onMouseUp( /* event */ ) { 384 | 385 | if ( scope.enabled === false ) return; 386 | 387 | document.removeEventListener( 'mousemove', onMouseMove, false ); 388 | document.removeEventListener( 'mouseup', onMouseUp, false ); 389 | scope.dispatchEvent( endEvent ); 390 | state = STATE.NONE; 391 | 392 | } 393 | 394 | function onMouseWheel( event ) { 395 | 396 | if ( scope.enabled === false || scope.noZoom === true ) return; 397 | 398 | event.preventDefault(); 399 | event.stopPropagation(); 400 | 401 | var delta = 0; 402 | 403 | if ( event.wheelDelta !== undefined ) { // WebKit / Opera / Explorer 9 404 | 405 | delta = event.wheelDelta; 406 | 407 | } else if ( event.detail !== undefined ) { // Firefox 408 | 409 | delta = - event.detail; 410 | 411 | } 412 | 413 | if ( delta > 0 ) { 414 | 415 | scope.dollyOut(getZoomScale()); 416 | 417 | } else { 418 | 419 | scope.dollyIn(getZoomScale()); 420 | 421 | } 422 | 423 | scope.update(); 424 | scope.dispatchEvent( startEvent ); 425 | scope.dispatchEvent( endEvent ); 426 | 427 | } 428 | 429 | function onKeyDown( event ) { 430 | 431 | if ( scope.enabled === false || scope.noKeys === true || scope.noPan === true ) return; 432 | 433 | var validKey = true; 434 | switch ( event.keyCode ) { 435 | // case scope.keys.UP: 436 | // scope.pan( 0, scope.keyPanSpeed ); 437 | // scope.update(); 438 | // break; 439 | 440 | // case scope.keys.BOTTOM: 441 | // scope.pan( 0, - scope.keyPanSpeed ); 442 | // scope.update(); 443 | // break; 444 | 445 | case scope.keys.LEFT: 446 | scope.pan( scope.keyPanSpeed, 0 ); 447 | scope.update(); 448 | break; 449 | 450 | case scope.keys.RIGHT: 451 | scope.pan( - scope.keyPanSpeed, 0 ); 452 | scope.update(); 453 | break; 454 | default: 455 | validKey = false; 456 | break; 457 | } 458 | if (validKey) event.preventDefault(); 459 | } 460 | 461 | function touchstart( event ) { 462 | 463 | if ( scope.enabled === false ) return; 464 | 465 | switch ( event.touches.length ) { 466 | 467 | case 1: // one-fingered touch: rotate 468 | 469 | if ( scope.noRotate === true ) return; 470 | 471 | state = STATE.TOUCH_ROTATE; 472 | 473 | rotateStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); 474 | break; 475 | 476 | case 2: // two-fingered touch: dolly 477 | 478 | if ( scope.noZoom === true ) return; 479 | 480 | state = STATE.TOUCH_DOLLY; 481 | 482 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; 483 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; 484 | var distance = Math.sqrt( dx * dx + dy * dy ); 485 | dollyStart.set( 0, distance ); 486 | break; 487 | 488 | case 3: // three-fingered touch: pan 489 | 490 | if ( scope.noPan === true ) return; 491 | 492 | state = STATE.TOUCH_PAN; 493 | 494 | panStart.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); 495 | break; 496 | 497 | default: 498 | 499 | state = STATE.NONE; 500 | 501 | } 502 | 503 | scope.dispatchEvent( startEvent ); 504 | 505 | } 506 | 507 | function touchmove( event ) { 508 | 509 | if ( scope.enabled === false ) return; 510 | 511 | event.preventDefault(); 512 | event.stopPropagation(); 513 | 514 | var element = scope.domElement === document ? scope.domElement.body : scope.domElement; 515 | 516 | switch ( event.touches.length ) { 517 | 518 | case 1: // one-fingered touch: rotate 519 | 520 | if ( scope.noRotate === true ) return; 521 | if ( state !== STATE.TOUCH_ROTATE ) return; 522 | 523 | rotateEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); 524 | rotateDelta.subVectors( rotateEnd, rotateStart ); 525 | 526 | // rotating across whole screen goes 360 degrees around 527 | scope.rotateLeft( 2 * Math.PI * rotateDelta.x / element.clientWidth * scope.rotateSpeed ); 528 | // rotating up and down along whole screen attempts to go 360, but limited to 180 529 | scope.rotateUp( 2 * Math.PI * rotateDelta.y / element.clientHeight * scope.rotateSpeed ); 530 | 531 | rotateStart.copy( rotateEnd ); 532 | 533 | scope.update(); 534 | break; 535 | 536 | case 2: // two-fingered touch: dolly 537 | 538 | if ( scope.noZoom === true ) return; 539 | if ( state !== STATE.TOUCH_DOLLY ) return; 540 | 541 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX; 542 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY; 543 | var distance = Math.sqrt( dx * dx + dy * dy ); 544 | 545 | dollyEnd.set( 0, distance ); 546 | dollyDelta.subVectors( dollyEnd, dollyStart ); 547 | 548 | if ( dollyDelta.y > 0 ) { 549 | 550 | scope.dollyOut(getZoomScale()); 551 | 552 | } else { 553 | 554 | scope.dollyIn(getZoomScale()); 555 | 556 | } 557 | 558 | dollyStart.copy( dollyEnd ); 559 | 560 | scope.update(); 561 | break; 562 | 563 | case 3: // three-fingered touch: pan 564 | 565 | if ( scope.noPan === true ) return; 566 | if ( state !== STATE.TOUCH_PAN ) return; 567 | 568 | panEnd.set( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ); 569 | panDelta.subVectors( panEnd, panStart ); 570 | 571 | scope.pan( panDelta.x, panDelta.y ); 572 | 573 | panStart.copy( panEnd ); 574 | 575 | scope.update(); 576 | break; 577 | 578 | default: 579 | 580 | state = STATE.NONE; 581 | 582 | } 583 | 584 | } 585 | 586 | function touchend( /* event */ ) { 587 | 588 | if ( scope.enabled === false ) return; 589 | 590 | scope.dispatchEvent( endEvent ); 591 | state = STATE.NONE; 592 | 593 | } 594 | 595 | this.domElement.addEventListener( 'contextmenu', function ( event ) { event.preventDefault(); }, false ); 596 | this.domElement.addEventListener( 'mousedown', onMouseDown, false ); 597 | this.domElement.addEventListener( 'mousewheel', onMouseWheel, false ); 598 | this.domElement.addEventListener( 'DOMMouseScroll', onMouseWheel, false ); // firefox 599 | 600 | this.domElement.addEventListener( 'touchstart', touchstart, false ); 601 | this.domElement.addEventListener( 'touchend', touchend, false ); 602 | this.domElement.addEventListener( 'touchmove', touchmove, false ); 603 | 604 | // force an update at start 605 | this.update(); 606 | 607 | }; 608 | 609 | THREE.OrbitControls.prototype = Object.create( THREE.EventDispatcher.prototype ); 610 | -------------------------------------------------------------------------------- /run_all.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Run the whole reconstruction pipeline. 5 | 6 | Usage: `./run_all.py opensfm-dataset-dir` 7 | """ 8 | 9 | from collections import defaultdict 10 | from copy import deepcopy 11 | from functools import partial 12 | from time import sleep 13 | import numpy as np 14 | import sys 15 | 16 | from opensfm import align 17 | from opensfm import dataset 18 | from opensfm import features 19 | from opensfm import matching 20 | from opensfm import reconstruction 21 | from opensfm import types 22 | 23 | # Required to see debugging output from opensfm / ceres. 24 | import logging 25 | logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) 26 | 27 | 28 | class Problem(object): 29 | """ 30 | Encodes domain knowledge about the problem. 31 | """ 32 | 33 | # Masks are image regions blacklisted for feature extraction. 34 | video2masks = { 35 | 0: [ 36 | dict(top=0.66, bottom=1.0, left=0.0, right=1.0), 37 | dict(top=0.0, bottom=0.2, left=0.0, right=0.25), 38 | dict(top=0.0, bottom=1.0, left=0.9, right=1.0), 39 | ], 40 | 1: [ 41 | dict(top=0.54, bottom=1.0, left=0.0, right=1.0), 42 | dict(top=0.0, bottom=1.0, left=0.9, right=1.0), 43 | ], 44 | 2: [ 45 | dict(top=0.68, bottom=1.0, left=0.0, right=1.0), 46 | dict(top=0.0, bottom=0.22, left=0.0, right=0.27), 47 | dict(top=0.0, bottom=1.0, left=0.88, right=1.0), 48 | ], 49 | 3: [ 50 | dict(top=0.61, bottom=1.0, left=0.0, right=1.0), 51 | dict(top=0.0, bottom=0.3, left=0.0, right=0.03), 52 | dict(top=0.0, bottom=1.0, left=0.9, right=1.0), 53 | ], 54 | 4: [ 55 | dict(top=0.66, bottom=1.0, left=0.0, right=1.0), 56 | dict(top=0.0, bottom=0.4, left=0.0, right=0.05), 57 | dict(top=0.0, bottom=1.0, left=0.88, right=1.0), 58 | ], 59 | } 60 | 61 | def __init__(self): 62 | # We have 5 videos of 10 frames each. 63 | self.videos = [0, 2, 3, 1, 4] 64 | self.frames = {v: range(10) for v in self.videos} 65 | 66 | self.image2masks = {} 67 | for v in self.videos: 68 | for f in self.frames[v]: 69 | self.image2masks[self.filename(v, f)] = self.video2masks[v] 70 | 71 | # Each video is shot with a potentially different camera. 72 | # This allows the optimization problem to choose different calibration 73 | # parameters (focal length, distortion params) for each camera. 74 | self.image2camera = {} 75 | for v in self.videos: 76 | camera = self.make_default_camera() 77 | camera.id = "camera_{}".format(v) 78 | for f in self.frames[v]: 79 | self.image2camera[self.filename(v, f)] = camera 80 | 81 | @staticmethod 82 | def make_default_camera(): 83 | camera = types.PerspectiveCamera() 84 | camera.width, camera.height = 960, 720 # FIXME don't hardcode 85 | camera.focal_prior = camera.focal = 1.0 86 | camera.k1_prior = camera.k1 = 0.0 87 | camera.k2_prior = camera.k2 = 0.0 88 | return camera 89 | 90 | @staticmethod 91 | def filename(video_id, frame_id): 92 | return '{}_{}.jpg'.format(video_id, frame_id) 93 | 94 | @staticmethod 95 | def pair(video_id_1, frame_id_1, video_id_2, frame_id_2): 96 | return ( 97 | Problem.filename(video_id_1, frame_id_1), 98 | Problem.filename(video_id_2, frame_id_2)) 99 | 100 | @staticmethod 101 | def reco_triple( 102 | video_id_1, frame_id_1, video_id_2, frame_id_2, hint_forward): 103 | return ( 104 | Problem.filename(video_id_1, frame_id_1), 105 | Problem.filename(video_id_2, frame_id_2), 106 | hint_forward) 107 | 108 | def get_pairs_to_match(self): 109 | """ 110 | Return pairs of images in which to match features. 111 | """ 112 | pairs = [] 113 | # Adjacent frames in each video: 114 | for v in self.videos: 115 | for f1, f2 in zip(self.frames[v], self.frames[v][1:]): 116 | pairs.append(self.pair(v, f1, v, f2)) 117 | # Corresponding frames in different videos: 118 | for i, v1 in enumerate(self.videos): 119 | for v2 in self.videos[i + 1:]: 120 | for f1, f2 in zip(self.frames[v1], self.frames[v2]): 121 | pairs.append(self.pair(v1, f1, v2, f2)) 122 | return pairs 123 | 124 | def get_reconstruction_order(self): 125 | """ 126 | Return order in which to build reconstruction. 127 | 128 | Each element of the returned list is a `(image1, image2, hint_forward)` 129 | triple, where `image1` and `image2` are filenames, and hint_forward is 130 | a bool that indicates `image2` was shot from directly in front of 131 | `image1`. (We use this hint to make the reconstruction problem easier.) 132 | """ 133 | order = [] 134 | 135 | # Order 1: All frames from video 0, then all frames from video 1, etc. 136 | for i, v in enumerate(self.videos): 137 | if i > 0: 138 | prev_v = self.videos[i - 1] 139 | order.append(self.reco_triple(prev_v, 0, v, 0, False)) 140 | for j, f in enumerate(self.frames[v]): 141 | if j > 0: 142 | prev_f = self.frames[v][j - 1] 143 | order.append(self.reco_triple(v, prev_f, v, f, True)) 144 | 145 | # # Order 2: Frame 0 from all vids, then frame 1 from all vids, etc. 146 | # # This seems a little less robust than Order 1. 147 | # v1 = self.videos[0] 148 | # for v2 in self.videos[1:]: 149 | # order.append(self.reco_triple(v1, 0, v2, 0, False)) 150 | # for f1 in xrange(9): 151 | # for v in self.videos: 152 | # order.append(self.reco_triple(v, f1, v, f1 + 1, True)) 153 | 154 | return order 155 | 156 | 157 | def extract_features(problem, data): 158 | """ 159 | Extract features from all images, and save to DataSet. 160 | """ 161 | assert 'masks' not in data.config 162 | for image in data.images(): 163 | if data.feature_index_exists(image): 164 | print "{} - extracting features (cached)".format(image) 165 | else: 166 | print "{} - extracting features".format(image) 167 | data.config['masks'] = problem.image2masks[image] 168 | points, descriptors, colors = features.extract_features( 169 | data.image_as_array(image), data.config) 170 | del data.config['masks'] 171 | data.save_features(image, points, descriptors, colors) 172 | index = features.build_flann_index(descriptors, data.config) 173 | data.save_feature_index(image, index) 174 | 175 | 176 | def get_candidate_matches(index1, descriptors2, config): 177 | """ 178 | Helper for `match_custom`. 179 | 180 | For each feature in `descriptors2`, find its 2 nearest neighbors in 181 | `index1`, and return either the best one, or both, depending on how good 182 | they are relative to each other. 183 | 184 | Returns a list of `(feat1, feat2, distance)` triples. 185 | The words "distance" and "cost" are used interchangeably. 186 | """ 187 | search_params = dict(checks=config.get('flann_checks', 200)) 188 | squared_ratio = config.get('lowes_ratio', 0.6)**2 189 | results, dists = index1.knnSearch(descriptors2, 2, params=search_params) 190 | candidates = [] 191 | for feat2 in xrange(len(descriptors2)): 192 | feat1a, feat1b = results[feat2, :] 193 | dist1a, dist1b = dists[feat2, :] 194 | if dist1a < squared_ratio * dist1b: 195 | # First match is much better than second match. 196 | candidates.append((feat1a, feat2, dist1a)) 197 | else: 198 | # First match and second match are similarly good. 199 | candidates.append((feat1a, feat2, dist1a)) 200 | candidates.append((feat1b, feat2, dist1b)) 201 | return candidates 202 | 203 | 204 | def match_custom(descriptors1, index1, descriptors2, index2, config): 205 | """ 206 | Custom method for matching features in two images. 207 | 208 | (Note: Did not end up using this.) 209 | 210 | The idea is to be more forgiving than `matching.match_symmetric`, and 211 | return more candidate feature pairs. Relies on `get_candidate_matches` to 212 | get the initial matches, and then cleans up the resulting graph to make the 213 | matches symmetric. (Symmetric means that each feature in image1 matches at 214 | most 1 feature in image2, and vice versa.) 215 | """ 216 | candidates_1_2 = get_candidate_matches(index1, descriptors2, config) 217 | candidates_2_1 = get_candidate_matches(index2, descriptors1, config) 218 | pair_to_cost = defaultdict(float) 219 | pair_to_num_seen = defaultdict(int) 220 | for feat1, feat2, cost in candidates_1_2: 221 | pair_to_cost[(feat1, feat2)] += cost 222 | pair_to_num_seen[(feat1, feat2)] += 1 223 | for feat2, feat1, cost in candidates_2_1: 224 | pair_to_cost[(feat1, feat2)] += cost 225 | pair_to_num_seen[(feat1, feat2)] += 1 226 | 227 | # Now we have a bipartite graph: 228 | # - left side nodes are features from image1 229 | # - right side nodes are features from image2 230 | # - on the edges we have the total cost, and num_seen (1 if the edge has 231 | # been seen in one direction, or 2 if the edge has been seen in both 232 | # directions) 233 | # Some nodes might have degree 2, and we want to cut some edges to make all 234 | # nodes have degree 0 or 1. Intuitively, if we have two edges from a node, 235 | # we want to cut the "worse" one (the one corresponding to a match that has 236 | # higher cost). This could be done optimally with min-cost max-flow, but we 237 | # opt for a fast greedy approach. First, for any node on the left side that 238 | # has 2 edges, we delete the worse edge. Then we repeat that for nodes on 239 | # the right side. 240 | 241 | matches = {} 242 | costs = {} 243 | for pair, cost in pair_to_cost.iteritems(): 244 | if pair_to_num_seen[pair] != 2: 245 | continue 246 | if cost >= config.get('cberzan_max_cost', 1.0): 247 | continue 248 | feat1, feat2 = pair 249 | if (feat1 not in costs or cost < costs[feat1]): 250 | matches[feat1] = feat2 251 | costs[feat1] = cost 252 | back_matches = {} 253 | back_costs = {} 254 | for feat1, feat2 in matches.iteritems(): 255 | cost = costs[feat1] 256 | if (feat2 not in back_costs or cost < back_costs[feat2]): 257 | back_matches[feat2] = feat1 258 | back_costs[feat2] = cost 259 | back_matches_arr = np.array(back_matches.items()) 260 | matches_arr = back_matches_arr[:, [1, 0]] 261 | return matches_arr 262 | 263 | 264 | def match_features(problem, data): 265 | """ 266 | Match features between all relevant pairs of images, and save to DataSet. 267 | """ 268 | pairs = problem.get_pairs_to_match() 269 | candidates = defaultdict(list) 270 | for image1, image2 in pairs: 271 | candidates[image1].append(image2) 272 | for image1, image2s in candidates.iteritems(): 273 | if data.matches_exists(image1): 274 | print "{} - matching features (cached)".format(image1) 275 | else: 276 | print "{} - matching features".format(image1) 277 | points1, descriptors1, colors1 = data.load_features(image1) 278 | index1 = data.load_feature_index(image1, descriptors1) 279 | image1_matches = {} 280 | for image2 in image2s: 281 | print "{} - {} - matching features".format(image1, image2) 282 | points2, descriptors2, colors2 = data.load_features(image2) 283 | index2 = data.load_feature_index(image2, descriptors2) 284 | image1_matches[image2] = matching.match_symmetric( 285 | descriptors1, index1, descriptors2, index2, data.config) 286 | # image1_matches[image2] = match_custom( 287 | # descriptors1, index1, descriptors2, index2, data.config) 288 | data.save_matches(image1, image1_matches) 289 | 290 | 291 | def create_tracks(problem, data): 292 | """ 293 | Create tracks graph based on matches, and save to DataSet. 294 | 295 | A track is a feature that has been matched in 2 or more images. Each track 296 | has a unique id. In the reconstruction, every track becomes a point in 3D. 297 | 298 | The tracks graph is a bipartite graph with images on the left side and 299 | track ids on the right side. An edge indicates that the given track occurs 300 | in the given image. The edge stores info about that (image, track) 301 | correspondence. 302 | """ 303 | try: 304 | data.load_tracks_graph() 305 | print "creating track graph (cached)" 306 | return 307 | except IOError: 308 | print "creating track graph" 309 | 310 | image2features = {} 311 | image2colors = {} 312 | for image in data.images(): 313 | points, descriptors, colors = data.load_features(image) 314 | image2features[image] = points[:, :2] 315 | image2colors[image] = colors 316 | 317 | images2matches = {} 318 | for image1 in data.images(): 319 | try: 320 | matches = data.load_matches(image1) 321 | except IOError: 322 | continue 323 | for image2 in matches: 324 | images2matches[image1, image2] = matches[image2] 325 | 326 | tracks_graph = matching.create_tracks_graph( 327 | image2features, image2colors, images2matches, data.config) 328 | data.save_tracks_graph(tracks_graph) 329 | 330 | 331 | def get_empty_metadata(): 332 | empty_metadata = types.ShotMetadata() 333 | empty_metadata.gps_position = [0.0, 0.0, 0.0] 334 | empty_metadata.gps_dop = 999999.0 335 | empty_metadata.orientation = 1 336 | return empty_metadata 337 | 338 | 339 | def custom_two_view_reconstruction( 340 | p1, p2, camera1, camera2, thresh, hint_forward): 341 | """ 342 | Find relative pose of `camera2` w.r.t. `camera1` based on matches. 343 | 344 | The matches are given as arrays of normalized 2D points, `p1` and `p2`. 345 | 346 | If `hint_forward` is True, retry until we find a transformation that places 347 | `camera2` in front of `camera`. 348 | 349 | Returns `R, t, inliers` where `R` is rotation as a Rodrigues vector, `t` is 350 | translation, and `inliers` is a list of indices in `p1` and `p2` that agree 351 | with this transformation. Note that the reconstruction is ambiguous up to 352 | scale, so the norm of `t` is meaningless. 353 | """ 354 | max_trials = 100 # FIXME put const in config 355 | for trial in xrange(max_trials): 356 | R, t, inliers = reconstruction.two_view_reconstruction( 357 | p1, p2, camera1, camera2, thresh) 358 | t_norm = t / np.linalg.norm(t) 359 | # print "R={} t={} t_norm={} len(inliers)={}".format( 360 | # R, t, t_norm, len(inliers)) 361 | if hint_forward and t_norm[2] >= -0.75: # FIXME put const in config 362 | print "hint_forward violated; retrying ({}/{})".format( 363 | trial + 1, max_trials) 364 | # HACK: Reconstruction.two_view_reconstruction uses 365 | # pyopengv.relative_pose_ransac, which initializes a 366 | # CentralRelativePoseSacProblem with the default options, including 367 | # randomSeed=true which means initializing the RNG with the current 368 | # time in seconds. This means that within the same second, we will 369 | # get exactly the same result. There is no way to avoid this while 370 | # using pyopengv, so we just sleep for a bit to get a new seed. 371 | sleep(0.5) 372 | continue 373 | break 374 | else: 375 | print "WARNING: hint_forward failed after {} trials".format(max_trials) 376 | # We failed to find a reconstruction that obeys hint_forward, so just 377 | # return an identity rotation and a straight-along-z-axis translation, 378 | # and hope that bundle adjustment will find a better solution. 379 | assert hint_forward 380 | R = np.zeros(3) 381 | t = np.array([0.0, 0.0, -1.0]) 382 | inliers = "ignored" 383 | return R, t, inliers 384 | 385 | 386 | def _debug_short(graph, reco, im1, im2, prefix): 387 | inlier_tracks = [track for track in graph[im2] if track in reco.points] 388 | print ( 389 | "{}: reco has {} points; new shot {} has {} points in reco" 390 | .format(prefix, len(reco.points), im2, len(inlier_tracks))) 391 | 392 | 393 | def _debug_verbose(graph, reco, im1, im2, prefix): 394 | print "{}:".format(prefix) 395 | 396 | inlier_tracks = [track for track in graph[im2] if track in reco.points] 397 | print "reco has {} points; new shot {} has {} points in reco".format( 398 | len(reco.points), im2, len(inlier_tracks)) 399 | 400 | cooccur = {} 401 | for image in reco.shots: 402 | if image == im2: 403 | continue 404 | cooccurring_tracks = [ 405 | track for track in inlier_tracks if track in graph[image]] 406 | cooccur[image] = len(cooccurring_tracks) 407 | print "tracks in reco and {} co-occur in {}".format(im2, cooccur) 408 | 409 | rel_pose = reco.shots[im2].pose.compose(reco.shots[im1].pose.inverse()) 410 | print "new shot has rel pose R={} t={}".format( 411 | rel_pose.rotation, rel_pose.translation) 412 | 413 | print 414 | 415 | 416 | def bootstrap_reconstruction( 417 | problem, data, graph, im1, im2, hint_forward=False): 418 | """ 419 | Build 3D reconstruction based on two images `im1` and `im2`. 420 | 421 | See `custom_two_view_reconstruction` for the meaning of `hint_forward`. 422 | 423 | Returns the `Reconstruction` object, or None if reconstruction failed. 424 | """ 425 | print "----------------" 426 | print "bootstrap_reconstruction({}, {}, hint_forward={})".format( 427 | im1, im2, hint_forward) 428 | 429 | camera1 = problem.image2camera[im1] 430 | camera2 = problem.image2camera[im2] 431 | cameras = {camera1.id: camera1, camera2.id: camera2} 432 | 433 | tracks, p1, p2 = matching.common_tracks(graph, im1, im2) 434 | print "Common tracks: {}".format(len(tracks)) 435 | 436 | thresh = data.config.get('five_point_algo_threshold', 0.006) 437 | min_inliers = data.config.get('five_point_algo_min_inliers', 50) 438 | R, t, inliers = custom_two_view_reconstruction( 439 | p1, p2, camera1, camera2, thresh, hint_forward) 440 | print "bootstrap: R={} t={} len(inliers)={}".format(R, t, len(inliers)) 441 | if len(inliers) <= 5: # FIXME: put const in config 442 | print "bootstrap failed: not enough points in initial reconstruction" 443 | return 444 | 445 | # Reconstruction is up to scale; set translation to 1. 446 | # (This will be corrected later in the bundle adjustment step.) 447 | t /= np.linalg.norm(t) 448 | 449 | reco = types.Reconstruction() 450 | reco.cameras = cameras 451 | 452 | shot1 = types.Shot() 453 | shot1.id = im1 454 | shot1.camera = camera1 455 | shot1.pose = types.Pose() 456 | shot1.metadata = get_empty_metadata() 457 | reco.add_shot(shot1) 458 | 459 | shot2 = types.Shot() 460 | shot2.id = im2 461 | shot2.camera = camera2 462 | shot2.pose = types.Pose(R, t) 463 | shot2.metadata = get_empty_metadata() 464 | reco.add_shot(shot2) 465 | 466 | reconstruction.triangulate_shot_features( 467 | graph, reco, im1, 468 | data.config.get('triangulation_threshold', 0.004), 469 | data.config.get('triangulation_min_ray_angle', 2.0)) 470 | if len(reco.points) < min_inliers: 471 | print "bootstrap failed: not enough points after triangulation" 472 | return 473 | 474 | reconstruction.bundle_single_view(graph, reco, im2, data.config) 475 | reconstruction.retriangulate(graph, reco, data.config) 476 | reconstruction.bundle_single_view(graph, reco, im2, data.config) 477 | 478 | debug = partial(_debug_short, graph, reco, im1, im2) 479 | debug("bootstraped reconstruction") 480 | 481 | return reco 482 | 483 | 484 | def grow_reconstruction( 485 | problem, data, graph, reco, im1, im2, hint_forward=False): 486 | """ 487 | Grow the given reconstruction `reco` by adding a new image `im2` to it. 488 | 489 | The new image `im2` is initially matched against an image `im1`, which must 490 | already exist in the reconstruction. 491 | 492 | See `custom_two_view_reconstruction` for the meaning of `hint_forward`. 493 | 494 | Updates the reconstruction in-place and returns `True` on success. 495 | """ 496 | # FIXME: 497 | # - Make DRY with bootstrap_reconstruction; they look similar but they're 498 | # different in subtle ways. 499 | # - Could probably align once at the end, instead of aligning at every 500 | # step. 501 | # - Sometimes we get "Termination: NO_CONVERGENCE" from Ceres. Check for 502 | # that error case and perhaps run it for more iterations. 503 | 504 | print "----------------" 505 | print "grow_reconstruction({}, {}, hint_forward={})".format( 506 | im1, im2, hint_forward) 507 | 508 | reconstruction.bundle(graph, reco, None, data.config) 509 | align.align_reconstruction(reco, None, data.config) 510 | 511 | assert im1 in reco.shots 512 | assert im2 not in reco.shots 513 | 514 | camera1 = problem.image2camera[im1] 515 | camera2 = problem.image2camera[im2] 516 | 517 | tracks, p1, p2 = matching.common_tracks(graph, im1, im2) 518 | print "Common tracks: {}".format(len(tracks)) 519 | 520 | thresh = data.config.get('five_point_algo_threshold', 0.006) 521 | R, t, inliers = custom_two_view_reconstruction( 522 | p1, p2, camera1, camera2, thresh, hint_forward) 523 | print "grow: R={} t={} len(inliers)={}".format(R, t, len(inliers)) 524 | if len(inliers) <= 5: 525 | print "grow failed: not enough points in initial reconstruction" 526 | return False 527 | 528 | # Reconstruction is up to scale; set translation to 1. 529 | # (This will be corrected later in the bundle adjustment step.) 530 | t /= np.linalg.norm(t) 531 | 532 | assert camera1.id in reco.cameras 533 | if camera2.id not in reco.cameras: 534 | reco.add_camera(camera2) 535 | 536 | shot1_pose = reco.shots[im1].pose 537 | shot2 = types.Shot() 538 | shot2.id = im2 539 | shot2.camera = camera2 540 | shot2.pose = types.Pose(R, t).compose(shot1_pose) 541 | shot2.metadata = get_empty_metadata() 542 | reco.add_shot(shot2) 543 | 544 | debug = partial(_debug_short, graph, reco, im1, im2) 545 | debug("started with") 546 | 547 | # FIXME: fail here if im2 does not have enough tracks in common with reco 548 | 549 | pose_before = deepcopy(reco.shots[im2].pose) 550 | reconstruction.bundle_single_view(graph, reco, im2, data.config) 551 | 552 | # It's possible that bundle_single_view caused hint_forward to be violated. 553 | # If that's the case, revert to the pose before bundle_single_view, and 554 | # hope that after triangulating the points from `im2`, bundle adjustment 555 | # will find a better solution. 556 | rel_pose_after = reco.shots[im2].pose.compose( 557 | reco.shots[im1].pose.inverse()) 558 | t_after_norm = rel_pose_after.translation / np.linalg.norm( 559 | rel_pose_after.translation) 560 | # print "*** t_after_norm={}".format(t_after_norm) 561 | if hint_forward and t_after_norm[2] >= -0.75: # FIXME put const in config 562 | print "*** hint_forward violated; undoing bundle_single_view" 563 | reco.shots[im2].pose = pose_before 564 | rel_pose_after = reco.shots[im2].pose.compose( 565 | reco.shots[im1].pose.inverse()) 566 | t_after_norm = rel_pose_after.translation / np.linalg.norm( 567 | rel_pose_after.translation) 568 | print "*** after undo: t_after_norm={}".format(t_after_norm) 569 | # print 570 | 571 | reconstruction.triangulate_shot_features( 572 | graph, reco, im2, 573 | data.config.get('triangulation_threshold', 0.004), 574 | data.config.get('triangulation_min_ray_angle', 2.0)) 575 | reconstruction.bundle(graph, reco, None, data.config) 576 | reconstruction.remove_outliers(graph, reco, data.config) 577 | align.align_reconstruction(reco, None, data.config) 578 | reconstruction.retriangulate(graph, reco, data.config) 579 | reconstruction.bundle(graph, reco, None, data.config) 580 | 581 | debug("ended with") 582 | 583 | return True 584 | 585 | 586 | def reconstruct(problem, data): 587 | """ 588 | Reconstruct 3D scene and save to the given DataSet. 589 | 590 | Relies on the feature matches and tracks graph to be already computed. 591 | 592 | Returns the reconstruction. 593 | """ 594 | graph = data.load_tracks_graph() 595 | reco_triple = problem.get_reconstruction_order() 596 | reco = bootstrap_reconstruction( 597 | problem, data, graph, 598 | reco_triple[0][0], reco_triple[0][1], reco_triple[0][2]) 599 | for prev_img, next_img, hint_forward in reco_triple[1:]: 600 | result = grow_reconstruction( 601 | problem, data, graph, reco, prev_img, next_img, hint_forward) 602 | if not result: 603 | print "Stopping because grow_reconstruction failed..." 604 | break 605 | 606 | # Give each 3D point in the reconstruction a color. 607 | reconstruction.paint_reconstruction(data, graph, reco) 608 | 609 | # Align the reconstruction with the ground plane for a sane visualization. 610 | align.align_reconstruction(reco, None, data.config) 611 | 612 | data.save_reconstruction([reco]) 613 | return reco 614 | 615 | 616 | if __name__ == "__main__": 617 | if len(sys.argv) != 2: 618 | print "usage: {} dataset".format(sys.argv[0]) 619 | sys.exit(1) 620 | problem = Problem() 621 | data = dataset.DataSet(sys.argv[1]) 622 | 623 | extract_features(problem, data) 624 | match_features(problem, data) 625 | create_tracks(problem, data) 626 | reco = reconstruct(problem, data) 627 | -------------------------------------------------------------------------------- /sample_output/stdout: -------------------------------------------------------------------------------- 1 | 4_7.jpg - extracting features (cached) 2 | 1_0.jpg - extracting features (cached) 3 | 1_8.jpg - extracting features (cached) 4 | 0_2.jpg - extracting features (cached) 5 | 4_1.jpg - extracting features (cached) 6 | 4_6.jpg - extracting features 7 | 4_4.jpg - extracting features 8 | 0_5.jpg - extracting features 9 | 4_2.jpg - extracting features 10 | 4_3.jpg - extracting features 11 | 2_8.jpg - extracting features 12 | 1_7.jpg - extracting features 13 | 2_5.jpg - extracting features 14 | 0_4.jpg - extracting features 15 | 3_9.jpg - extracting features 16 | 4_0.jpg - extracting features 17 | 0_1.jpg - extracting features 18 | 3_3.jpg - extracting features 19 | 0_9.jpg - extracting features 20 | 3_5.jpg - extracting features 21 | 2_9.jpg - extracting features 22 | 0_3.jpg - extracting features 23 | 3_7.jpg - extracting features 24 | 2_2.jpg - extracting features 25 | 1_4.jpg - extracting features 26 | 4_8.jpg - extracting features 27 | 0_0.jpg - extracting features 28 | 3_8.jpg - extracting features 29 | 3_1.jpg - extracting features 30 | 2_7.jpg - extracting features 31 | 4_9.jpg - extracting features 32 | 2_4.jpg - extracting features 33 | 1_6.jpg - extracting features 34 | 3_6.jpg - extracting features 35 | 3_4.jpg - extracting features 36 | 4_5.jpg - extracting features 37 | 2_0.jpg - extracting features 38 | 1_9.jpg - extracting features 39 | 1_3.jpg - extracting features 40 | 2_1.jpg - extracting features 41 | 1_5.jpg - extracting features 42 | 0_8.jpg - extracting features 43 | 3_2.jpg - extracting features 44 | 2_3.jpg - extracting features 45 | 2_6.jpg - extracting features 46 | 1_2.jpg - extracting features 47 | 1_1.jpg - extracting features 48 | 3_0.jpg - extracting features 49 | 0_6.jpg - extracting features 50 | 0_7.jpg - extracting features 51 | 2_7.jpg - matching features 52 | 2_7.jpg - 2_8.jpg - matching features 53 | 2_7.jpg - 3_7.jpg - matching features 54 | 2_7.jpg - 1_7.jpg - matching features 55 | 2_7.jpg - 4_7.jpg - matching features 56 | 2_8.jpg - matching features 57 | 2_8.jpg - 2_9.jpg - matching features 58 | 2_8.jpg - 3_8.jpg - matching features 59 | 2_8.jpg - 1_8.jpg - matching features 60 | 2_8.jpg - 4_8.jpg - matching features 61 | 0_5.jpg - matching features 62 | 0_5.jpg - 0_6.jpg - matching features 63 | 0_5.jpg - 2_5.jpg - matching features 64 | 0_5.jpg - 3_5.jpg - matching features 65 | 0_5.jpg - 1_5.jpg - matching features 66 | 0_5.jpg - 4_5.jpg - matching features 67 | 4_3.jpg - matching features 68 | 4_3.jpg - 4_4.jpg - matching features 69 | 2_4.jpg - matching features 70 | 2_4.jpg - 2_5.jpg - matching features 71 | 2_4.jpg - 3_4.jpg - matching features 72 | 2_4.jpg - 1_4.jpg - matching features 73 | 2_4.jpg - 4_4.jpg - matching features 74 | 2_2.jpg - matching features 75 | 2_2.jpg - 2_3.jpg - matching features 76 | 2_2.jpg - 3_2.jpg - matching features 77 | 2_2.jpg - 1_2.jpg - matching features 78 | 2_2.jpg - 4_2.jpg - matching features 79 | 1_2.jpg - matching features 80 | 1_2.jpg - 1_3.jpg - matching features 81 | 1_2.jpg - 4_2.jpg - matching features 82 | 0_8.jpg - matching features 83 | 0_8.jpg - 0_9.jpg - matching features 84 | 0_8.jpg - 2_8.jpg - matching features 85 | 0_8.jpg - 3_8.jpg - matching features 86 | 0_8.jpg - 1_8.jpg - matching features 87 | 0_8.jpg - 4_8.jpg - matching features 88 | 0_1.jpg - matching features 89 | 0_1.jpg - 0_2.jpg - matching features 90 | 0_1.jpg - 2_1.jpg - matching features 91 | 0_1.jpg - 3_1.jpg - matching features 92 | 0_1.jpg - 1_1.jpg - matching features 93 | 0_1.jpg - 4_1.jpg - matching features 94 | 0_3.jpg - matching features 95 | 0_3.jpg - 0_4.jpg - matching features 96 | 0_3.jpg - 2_3.jpg - matching features 97 | 0_3.jpg - 3_3.jpg - matching features 98 | 0_3.jpg - 1_3.jpg - matching features 99 | 0_3.jpg - 4_3.jpg - matching features 100 | 1_7.jpg - matching features 101 | 1_7.jpg - 1_8.jpg - matching features 102 | 1_7.jpg - 4_7.jpg - matching features 103 | 3_9.jpg - matching features 104 | 3_9.jpg - 1_9.jpg - matching features 105 | 3_9.jpg - 4_9.jpg - matching features 106 | 3_8.jpg - matching features 107 | 3_8.jpg - 3_9.jpg - matching features 108 | 3_8.jpg - 1_8.jpg - matching features 109 | 3_8.jpg - 4_8.jpg - matching features 110 | 4_2.jpg - matching features 111 | 4_2.jpg - 4_3.jpg - matching features 112 | 1_6.jpg - matching features 113 | 1_6.jpg - 1_7.jpg - matching features 114 | 1_6.jpg - 4_6.jpg - matching features 115 | 4_1.jpg - matching features 116 | 4_1.jpg - 4_2.jpg - matching features 117 | 2_6.jpg - matching features 118 | 2_6.jpg - 2_7.jpg - matching features 119 | 2_6.jpg - 3_6.jpg - matching features 120 | 2_6.jpg - 1_6.jpg - matching features 121 | 2_6.jpg - 4_6.jpg - matching features 122 | 3_1.jpg - matching features 123 | 3_1.jpg - 3_2.jpg - matching features 124 | 3_1.jpg - 1_1.jpg - matching features 125 | 3_1.jpg - 4_1.jpg - matching features 126 | 2_0.jpg - matching features 127 | 2_0.jpg - 2_1.jpg - matching features 128 | 2_0.jpg - 3_0.jpg - matching features 129 | 2_0.jpg - 1_0.jpg - matching features 130 | 2_0.jpg - 4_0.jpg - matching features 131 | 3_2.jpg - matching features 132 | 3_2.jpg - 3_3.jpg - matching features 133 | 3_2.jpg - 1_2.jpg - matching features 134 | 3_2.jpg - 4_2.jpg - matching features 135 | 1_3.jpg - matching features 136 | 1_3.jpg - 1_4.jpg - matching features 137 | 1_3.jpg - 4_3.jpg - matching features 138 | 4_5.jpg - matching features 139 | 4_5.jpg - 4_6.jpg - matching features 140 | 1_5.jpg - matching features 141 | 1_5.jpg - 1_6.jpg - matching features 142 | 1_5.jpg - 4_5.jpg - matching features 143 | 4_7.jpg - matching features 144 | 4_7.jpg - 4_8.jpg - matching features 145 | 2_3.jpg - matching features 146 | 2_3.jpg - 2_4.jpg - matching features 147 | 2_3.jpg - 3_3.jpg - matching features 148 | 2_3.jpg - 1_3.jpg - matching features 149 | 2_3.jpg - 4_3.jpg - matching features 150 | 0_0.jpg - matching features 151 | 0_0.jpg - 0_1.jpg - matching features 152 | 0_0.jpg - 2_0.jpg - matching features 153 | 0_0.jpg - 3_0.jpg - matching features 154 | 0_0.jpg - 1_0.jpg - matching features 155 | 0_0.jpg - 4_0.jpg - matching features 156 | 3_6.jpg - matching features 157 | 3_6.jpg - 3_7.jpg - matching features 158 | 3_6.jpg - 1_6.jpg - matching features 159 | 3_6.jpg - 4_6.jpg - matching features 160 | 1_4.jpg - matching features 161 | 1_4.jpg - 1_5.jpg - matching features 162 | 1_4.jpg - 4_4.jpg - matching features 163 | 3_7.jpg - matching features 164 | 3_7.jpg - 3_8.jpg - matching features 165 | 3_7.jpg - 1_7.jpg - matching features 166 | 3_7.jpg - 4_7.jpg - matching features 167 | 2_5.jpg - matching features 168 | 2_5.jpg - 2_6.jpg - matching features 169 | 2_5.jpg - 3_5.jpg - matching features 170 | 2_5.jpg - 1_5.jpg - matching features 171 | 2_5.jpg - 4_5.jpg - matching features 172 | 0_9.jpg - matching features 173 | 0_9.jpg - 2_9.jpg - matching features 174 | 0_9.jpg - 3_9.jpg - matching features 175 | 0_9.jpg - 1_9.jpg - matching features 176 | 0_9.jpg - 4_9.jpg - matching features 177 | 1_0.jpg - matching features 178 | 1_0.jpg - 1_1.jpg - matching features 179 | 1_0.jpg - 4_0.jpg - matching features 180 | 3_0.jpg - matching features 181 | 3_0.jpg - 3_1.jpg - matching features 182 | 3_0.jpg - 1_0.jpg - matching features 183 | 3_0.jpg - 4_0.jpg - matching features 184 | 4_8.jpg - matching features 185 | 4_8.jpg - 4_9.jpg - matching features 186 | 1_9.jpg - matching features 187 | 1_9.jpg - 4_9.jpg - matching features 188 | 4_4.jpg - matching features 189 | 4_4.jpg - 4_5.jpg - matching features 190 | 2_1.jpg - matching features 191 | 2_1.jpg - 2_2.jpg - matching features 192 | 2_1.jpg - 3_1.jpg - matching features 193 | 2_1.jpg - 1_1.jpg - matching features 194 | 2_1.jpg - 4_1.jpg - matching features 195 | 0_2.jpg - matching features 196 | 0_2.jpg - 0_3.jpg - matching features 197 | 0_2.jpg - 2_2.jpg - matching features 198 | 0_2.jpg - 3_2.jpg - matching features 199 | 0_2.jpg - 1_2.jpg - matching features 200 | 0_2.jpg - 4_2.jpg - matching features 201 | 0_4.jpg - matching features 202 | 0_4.jpg - 0_5.jpg - matching features 203 | 0_4.jpg - 2_4.jpg - matching features 204 | 0_4.jpg - 3_4.jpg - matching features 205 | 0_4.jpg - 1_4.jpg - matching features 206 | 0_4.jpg - 4_4.jpg - matching features 207 | 3_5.jpg - matching features 208 | 3_5.jpg - 3_6.jpg - matching features 209 | 3_5.jpg - 1_5.jpg - matching features 210 | 3_5.jpg - 4_5.jpg - matching features 211 | 3_3.jpg - matching features 212 | 3_3.jpg - 3_4.jpg - matching features 213 | 3_3.jpg - 1_3.jpg - matching features 214 | 3_3.jpg - 4_3.jpg - matching features 215 | 1_1.jpg - matching features 216 | 1_1.jpg - 1_2.jpg - matching features 217 | 1_1.jpg - 4_1.jpg - matching features 218 | 0_7.jpg - matching features 219 | 0_7.jpg - 0_8.jpg - matching features 220 | 0_7.jpg - 2_7.jpg - matching features 221 | 0_7.jpg - 3_7.jpg - matching features 222 | 0_7.jpg - 1_7.jpg - matching features 223 | 0_7.jpg - 4_7.jpg - matching features 224 | 2_9.jpg - matching features 225 | 2_9.jpg - 3_9.jpg - matching features 226 | 2_9.jpg - 1_9.jpg - matching features 227 | 2_9.jpg - 4_9.jpg - matching features 228 | 1_8.jpg - matching features 229 | 1_8.jpg - 1_9.jpg - matching features 230 | 1_8.jpg - 4_8.jpg - matching features 231 | 4_6.jpg - matching features 232 | 4_6.jpg - 4_7.jpg - matching features 233 | 4_0.jpg - matching features 234 | 4_0.jpg - 4_1.jpg - matching features 235 | 3_4.jpg - matching features 236 | 3_4.jpg - 3_5.jpg - matching features 237 | 3_4.jpg - 1_4.jpg - matching features 238 | 3_4.jpg - 4_4.jpg - matching features 239 | 0_6.jpg - matching features 240 | 0_6.jpg - 0_7.jpg - matching features 241 | 0_6.jpg - 2_6.jpg - matching features 242 | 0_6.jpg - 3_6.jpg - matching features 243 | 0_6.jpg - 1_6.jpg - matching features 244 | 0_6.jpg - 4_6.jpg - matching features 245 | creating track graph 246 | ---------------- 247 | bootstrap_reconstruction(0_0.jpg, 0_1.jpg, hint_forward=True) 248 | Common tracks: 293 249 | bootstrap: R=[-0.0026113 0.00097914 0.00289513] t=[ 4.15746631 -9.03045287 -67.59726106] len(inliers)=282 250 | bootstraped reconstruction: reco has 218 points; new shot 0_1.jpg has 218 points in reco 251 | ---------------- 252 | grow_reconstruction(0_1.jpg, 0_2.jpg, hint_forward=True) 253 | Common tracks: 225 254 | grow: R=[-0.00216996 -0.00101607 0.00506867] t=[ 0.05378054 -0.09831717 -0.72784638] len(inliers)=209 255 | started with: reco has 218 points; new shot 0_2.jpg has 90 points in reco 256 | ended with: reco has 307 points; new shot 0_2.jpg has 179 points in reco 257 | ---------------- 258 | grow_reconstruction(0_2.jpg, 0_3.jpg, hint_forward=True) 259 | Common tracks: 182 260 | grow: R=[-0.00320015 0.00038499 0.00582655] t=[ 0.02269515 -0.04536311 -0.35866523] len(inliers)=164 261 | started with: reco has 307 points; new shot 0_3.jpg has 59 points in reco 262 | ended with: reco has 395 points; new shot 0_3.jpg has 147 points in reco 263 | ---------------- 264 | grow_reconstruction(0_3.jpg, 0_4.jpg, hint_forward=True) 265 | Common tracks: 72 266 | grow: R=[ 1.55841442e-03 -1.80209892e-03 -5.04594462e-05] t=[ 0.04598033 -0.0793917 -0.70846328] len(inliers)=63 267 | started with: reco has 395 points; new shot 0_4.jpg has 19 points in reco 268 | ended with: reco has 439 points; new shot 0_4.jpg has 62 points in reco 269 | ---------------- 270 | grow_reconstruction(0_4.jpg, 0_5.jpg, hint_forward=True) 271 | Common tracks: 70 272 | grow: R=[-0.00172733 -0.00667166 0.00808561] t=[ 0.04156167 -0.06331776 -0.56911623] len(inliers)=56 273 | started with: reco has 439 points; new shot 0_5.jpg has 11 points in reco 274 | ended with: reco has 481 points; new shot 0_5.jpg has 53 points in reco 275 | ---------------- 276 | grow_reconstruction(0_5.jpg, 0_6.jpg, hint_forward=True) 277 | Common tracks: 62 278 | hint_forward violated; retrying (1/100) 279 | hint_forward violated; retrying (2/100) 280 | hint_forward violated; retrying (3/100) 281 | hint_forward violated; retrying (4/100) 282 | grow: R=[ 0.00203125 0.0023269 0.0058134 ] t=[ 0.23015393 -0.6976593 -6.93736807] len(inliers)=54 283 | started with: reco has 481 points; new shot 0_6.jpg has 19 points in reco 284 | ended with: reco has 504 points; new shot 0_6.jpg has 43 points in reco 285 | ---------------- 286 | grow_reconstruction(0_6.jpg, 0_7.jpg, hint_forward=True) 287 | Common tracks: 83 288 | grow: R=[-0.00457994 0.00776047 -0.00067569] t=[ 0.04178429 -0.20840729 -1.56510472] len(inliers)=76 289 | started with: reco has 504 points; new shot 0_7.jpg has 18 points in reco 290 | ended with: reco has 549 points; new shot 0_7.jpg has 63 points in reco 291 | ---------------- 292 | grow_reconstruction(0_7.jpg, 0_8.jpg, hint_forward=True) 293 | Common tracks: 152 294 | grow: R=[-0.00344428 0.0005717 0.0049515 ] t=[ 0.0558397 -0.10013379 -0.73045368] len(inliers)=126 295 | started with: reco has 549 points; new shot 0_8.jpg has 20 points in reco 296 | ended with: reco has 638 points; new shot 0_8.jpg has 108 points in reco 297 | ---------------- 298 | grow_reconstruction(0_8.jpg, 0_9.jpg, hint_forward=True) 299 | Common tracks: 193 300 | grow: R=[-0.00449678 0.00612259 -0.00131706] t=[ 0.11867744 -0.24394559 -1.77620371] len(inliers)=176 301 | started with: reco has 638 points; new shot 0_9.jpg has 42 points in reco 302 | ended with: reco has 749 points; new shot 0_9.jpg has 151 points in reco 303 | ---------------- 304 | grow_reconstruction(0_0.jpg, 2_0.jpg, hint_forward=False) 305 | Common tracks: 103 306 | grow: R=[-0.01488433 -0.00080515 0.0051172 ] t=[ 0.84138039 -2.40316756 -17.1903626 ] len(inliers)=97 307 | started with: reco has 749 points; new shot 2_0.jpg has 60 points in reco 308 | ended with: reco has 804 points; new shot 2_0.jpg has 111 points in reco 309 | ---------------- 310 | grow_reconstruction(2_0.jpg, 2_1.jpg, hint_forward=True) 311 | Common tracks: 231 312 | grow: R=[-0.00399514 -0.00097171 0.00337843] t=[ 0.03959416 -0.10006968 -0.638021 ] len(inliers)=221 313 | started with: reco has 804 points; new shot 2_1.jpg has 80 points in reco 314 | ended with: reco has 948 points; new shot 2_1.jpg has 224 points in reco 315 | ---------------- 316 | grow_reconstruction(2_1.jpg, 2_2.jpg, hint_forward=True) 317 | Common tracks: 160 318 | grow: R=[-0.00527386 -0.00143242 0.00510689] t=[ 0.31315546 -0.56510191 -3.77770571] len(inliers)=146 319 | started with: reco has 948 points; new shot 2_2.jpg has 70 points in reco 320 | ended with: reco has 1041 points; new shot 2_2.jpg has 163 points in reco 321 | ---------------- 322 | grow_reconstruction(2_2.jpg, 2_3.jpg, hint_forward=True) 323 | Common tracks: 147 324 | grow: R=[-0.0047498 -0.00104171 0.00587619] t=[ 0.05923226 -0.11623709 -0.80769912] len(inliers)=137 325 | started with: reco has 1041 points; new shot 2_3.jpg has 80 points in reco 326 | ended with: reco has 1137 points; new shot 2_3.jpg has 175 points in reco 327 | ---------------- 328 | grow_reconstruction(2_3.jpg, 2_4.jpg, hint_forward=True) 329 | Common tracks: 92 330 | grow: R=[-0.01089005 -0.00095324 0.01206417] t=[ 0.63853365 -2.37471186 -12.86195923] len(inliers)=82 331 | started with: reco has 1137 points; new shot 2_4.jpg has 33 points in reco 332 | ended with: reco has 1204 points; new shot 2_4.jpg has 100 points in reco 333 | ---------------- 334 | grow_reconstruction(2_4.jpg, 2_5.jpg, hint_forward=True) 335 | Common tracks: 85 336 | grow: R=[-0.00733342 -0.00544926 0.01574143] t=[ 0.0416962 -0.09858134 -0.63567351] len(inliers)=76 337 | started with: reco has 1204 points; new shot 2_5.jpg has 40 points in reco 338 | ended with: reco has 1264 points; new shot 2_5.jpg has 100 points in reco 339 | ---------------- 340 | grow_reconstruction(2_5.jpg, 2_6.jpg, hint_forward=True) 341 | Common tracks: 73 342 | hint_forward violated; retrying (1/100) 343 | hint_forward violated; retrying (2/100) 344 | grow: R=[-0.00078986 0.00116973 -0.0008691 ] t=[ 0.0301173 -0.0609606 -0.45124812] len(inliers)=60 345 | started with: reco has 1264 points; new shot 2_6.jpg has 52 points in reco 346 | *** hint_forward violated; undoing bundle_single_view 347 | *** after undo: t_after_norm=[ 0.0659972 -0.1335853 -0.98883737] 348 | ended with: reco has 1322 points; new shot 2_6.jpg has 108 points in reco 349 | ---------------- 350 | grow_reconstruction(2_6.jpg, 2_7.jpg, hint_forward=True) 351 | Common tracks: 131 352 | grow: R=[ 0.00071532 0.00391031 -0.00726274] t=[ 0.03538102 -0.08315121 -0.66275205] len(inliers)=120 353 | started with: reco has 1322 points; new shot 2_7.jpg has 65 points in reco 354 | ended with: reco has 1402 points; new shot 2_7.jpg has 141 points in reco 355 | ---------------- 356 | grow_reconstruction(2_7.jpg, 2_8.jpg, hint_forward=True) 357 | Common tracks: 165 358 | grow: R=[-0.00210985 0.00129868 -0.00442955] t=[ 0.04725014 -0.09257014 -0.65291238] len(inliers)=147 359 | started with: reco has 1402 points; new shot 2_8.jpg has 70 points in reco 360 | ended with: reco has 1503 points; new shot 2_8.jpg has 171 points in reco 361 | ---------------- 362 | grow_reconstruction(2_8.jpg, 2_9.jpg, hint_forward=True) 363 | Common tracks: 166 364 | grow: R=[ -7.59299186e-03 1.97186199e-03 1.26818645e-05] t=[ 0.05264056 -0.09344022 -0.5780003 ] len(inliers)=143 365 | started with: reco has 1503 points; new shot 2_9.jpg has 61 points in reco 366 | ended with: reco has 1706 points; new shot 2_9.jpg has 249 points in reco 367 | ---------------- 368 | grow_reconstruction(2_0.jpg, 3_0.jpg, hint_forward=False) 369 | Common tracks: 45 370 | grow: R=[ 0.06084941 -0.04218081 -0.12372925] t=[ 0.46289733 0.31795609 4.49152502] len(inliers)=37 371 | started with: reco has 1706 points; new shot 3_0.jpg has 31 points in reco 372 | ended with: reco has 1771 points; new shot 3_0.jpg has 77 points in reco 373 | ---------------- 374 | grow_reconstruction(3_0.jpg, 3_1.jpg, hint_forward=True) 375 | Common tracks: 130 376 | grow: R=[-0.0016498 0.00068788 -0.00197432] t=[ 0.06347268 -0.05992739 -0.65729168] len(inliers)=118 377 | started with: reco has 1771 points; new shot 3_1.jpg has 65 points in reco 378 | ended with: reco has 1906 points; new shot 3_1.jpg has 200 points in reco 379 | ---------------- 380 | grow_reconstruction(3_1.jpg, 3_2.jpg, hint_forward=True) 381 | Common tracks: 86 382 | grow: R=[ 0.00204547 0.01015233 -0.00786492] t=[ 0.03484323 -0.04268191 -0.54720856] len(inliers)=71 383 | started with: reco has 1906 points; new shot 3_2.jpg has 87 points in reco 384 | ended with: reco has 1998 points; new shot 3_2.jpg has 171 points in reco 385 | ---------------- 386 | grow_reconstruction(3_2.jpg, 3_3.jpg, hint_forward=True) 387 | Common tracks: 59 388 | grow: R=[ 0.00072119 -0.00223857 0.00611958] t=[ 0.02321482 -0.01966203 -0.2457398 ] len(inliers)=51 389 | started with: reco has 1998 points; new shot 3_3.jpg has 105 points in reco 390 | ended with: reco has 2112 points; new shot 3_3.jpg has 220 points in reco 391 | ---------------- 392 | grow_reconstruction(3_3.jpg, 3_4.jpg, hint_forward=True) 393 | Common tracks: 47 394 | hint_forward violated; retrying (1/100) 395 | hint_forward violated; retrying (2/100) 396 | hint_forward violated; retrying (3/100) 397 | hint_forward violated; retrying (4/100) 398 | hint_forward violated; retrying (5/100) 399 | hint_forward violated; retrying (6/100) 400 | hint_forward violated; retrying (7/100) 401 | hint_forward violated; retrying (8/100) 402 | hint_forward violated; retrying (9/100) 403 | hint_forward violated; retrying (10/100) 404 | hint_forward violated; retrying (11/100) 405 | hint_forward violated; retrying (12/100) 406 | hint_forward violated; retrying (13/100) 407 | hint_forward violated; retrying (14/100) 408 | hint_forward violated; retrying (15/100) 409 | hint_forward violated; retrying (16/100) 410 | hint_forward violated; retrying (17/100) 411 | hint_forward violated; retrying (18/100) 412 | hint_forward violated; retrying (19/100) 413 | hint_forward violated; retrying (20/100) 414 | hint_forward violated; retrying (21/100) 415 | hint_forward violated; retrying (22/100) 416 | hint_forward violated; retrying (23/100) 417 | hint_forward violated; retrying (24/100) 418 | hint_forward violated; retrying (25/100) 419 | grow: R=[ 0.00057872 -0.00110287 -0.00067985] t=[ 0.0583017 -0.05403716 -0.70156422] len(inliers)=32 420 | started with: reco has 2112 points; new shot 3_4.jpg has 42 points in reco 421 | ended with: reco has 2147 points; new shot 3_4.jpg has 76 points in reco 422 | ---------------- 423 | grow_reconstruction(3_4.jpg, 3_5.jpg, hint_forward=True) 424 | Common tracks: 29 425 | grow: R=[ 0.00229186 0.01093106 0.00038495] t=[ 0.1510063 -0.12999657 -1.82529506] len(inliers)=22 426 | started with: reco has 2147 points; new shot 3_5.jpg has 26 points in reco 427 | ended with: reco has 2181 points; new shot 3_5.jpg has 56 points in reco 428 | ---------------- 429 | grow_reconstruction(3_5.jpg, 3_6.jpg, hint_forward=True) 430 | Common tracks: 42 431 | grow: R=[ 2.49963546e-05 -1.17434944e-01 3.36943752e-03] t=[ 0.72085267 -0.06782402 -0.90410661] len(inliers)=38 432 | started with: reco has 2181 points; new shot 3_6.jpg has 32 points in reco 433 | ended with: reco has 2204 points; new shot 3_6.jpg has 57 points in reco 434 | ---------------- 435 | grow_reconstruction(3_6.jpg, 3_7.jpg, hint_forward=True) 436 | Common tracks: 64 437 | grow: R=[ 0.00251821 -0.01524014 -0.00197349] t=[ 0.10080729 -0.04503532 -0.69795923] len(inliers)=53 438 | started with: reco has 2204 points; new shot 3_7.jpg has 29 points in reco 439 | ended with: reco has 2245 points; new shot 3_7.jpg has 68 points in reco 440 | ---------------- 441 | grow_reconstruction(3_7.jpg, 3_8.jpg, hint_forward=True) 442 | Common tracks: 80 443 | grow: R=[-0.00582711 -0.00184589 0.02408264] t=[ 0.13505861 -0.1645549 -1.59643735] len(inliers)=55 444 | started with: reco has 2245 points; new shot 3_8.jpg has 24 points in reco 445 | ended with: reco has 2278 points; new shot 3_8.jpg has 53 points in reco 446 | ---------------- 447 | grow_reconstruction(3_8.jpg, 3_9.jpg, hint_forward=True) 448 | Common tracks: 35 449 | hint_forward violated; retrying (1/100) 450 | grow: R=[-0.00255295 -0.00315544 -0.00161593] t=[ 0.04505729 -0.04281617 -0.46099631] len(inliers)=21 451 | started with: reco has 2278 points; new shot 3_9.jpg has 12 points in reco 452 | ended with: reco has 2310 points; new shot 3_9.jpg has 37 points in reco 453 | ---------------- 454 | grow_reconstruction(3_0.jpg, 1_0.jpg, hint_forward=False) 455 | Common tracks: 197 456 | grow: R=[ 0.05915667 0.08398633 0.04571605] t=[-1.86945905 -0.15173594 1.22970825] len(inliers)=193 457 | started with: reco has 2310 points; new shot 1_0.jpg has 63 points in reco 458 | ended with: reco has 2440 points; new shot 1_0.jpg has 189 points in reco 459 | ---------------- 460 | grow_reconstruction(1_0.jpg, 1_1.jpg, hint_forward=True) 461 | Common tracks: 126 462 | grow: R=[-0.00167703 -0.00467628 0.00199516] t=[ 0.05914559 -0.02292243 -0.73841685] len(inliers)=118 463 | started with: reco has 2440 points; new shot 1_1.jpg has 128 points in reco 464 | ended with: reco has 2533 points; new shot 1_1.jpg has 220 points in reco 465 | ---------------- 466 | grow_reconstruction(1_1.jpg, 1_2.jpg, hint_forward=True) 467 | Common tracks: 81 468 | grow: R=[-0.00355017 -0.00662411 0.00129656] t=[ 0.34666076 -0.13294139 -4.26517899] len(inliers)=74 469 | started with: reco has 2533 points; new shot 1_2.jpg has 111 points in reco 470 | ended with: reco has 2617 points; new shot 1_2.jpg has 195 points in reco 471 | ---------------- 472 | grow_reconstruction(1_2.jpg, 1_3.jpg, hint_forward=True) 473 | Common tracks: 65 474 | grow: R=[ 0.00095759 -0.00091654 0.00029428] t=[ 0.04382434 -0.00745566 -0.69510447] len(inliers)=57 475 | started with: reco has 2617 points; new shot 1_3.jpg has 164 points in reco 476 | ended with: reco has 2751 points; new shot 1_3.jpg has 294 points in reco 477 | ---------------- 478 | grow_reconstruction(1_3.jpg, 1_4.jpg, hint_forward=True) 479 | Common tracks: 37 480 | grow: R=[ 0.00123165 0.00010285 0.00155156] t=[ 0.0286619 0.00136055 -0.5615628 ] len(inliers)=31 481 | started with: reco has 2751 points; new shot 1_4.jpg has 52 points in reco 482 | ended with: reco has 2852 points; new shot 1_4.jpg has 153 points in reco 483 | ---------------- 484 | grow_reconstruction(1_4.jpg, 1_5.jpg, hint_forward=True) 485 | Common tracks: 36 486 | hint_forward violated; retrying (1/100) 487 | hint_forward violated; retrying (2/100) 488 | hint_forward violated; retrying (3/100) 489 | hint_forward violated; retrying (4/100) 490 | hint_forward violated; retrying (5/100) 491 | hint_forward violated; retrying (6/100) 492 | hint_forward violated; retrying (7/100) 493 | hint_forward violated; retrying (8/100) 494 | hint_forward violated; retrying (9/100) 495 | hint_forward violated; retrying (10/100) 496 | hint_forward violated; retrying (11/100) 497 | hint_forward violated; retrying (12/100) 498 | hint_forward violated; retrying (13/100) 499 | hint_forward violated; retrying (14/100) 500 | hint_forward violated; retrying (15/100) 501 | hint_forward violated; retrying (16/100) 502 | hint_forward violated; retrying (17/100) 503 | hint_forward violated; retrying (18/100) 504 | hint_forward violated; retrying (19/100) 505 | hint_forward violated; retrying (20/100) 506 | hint_forward violated; retrying (21/100) 507 | hint_forward violated; retrying (22/100) 508 | hint_forward violated; retrying (23/100) 509 | hint_forward violated; retrying (24/100) 510 | hint_forward violated; retrying (25/100) 511 | hint_forward violated; retrying (26/100) 512 | hint_forward violated; retrying (27/100) 513 | hint_forward violated; retrying (28/100) 514 | hint_forward violated; retrying (29/100) 515 | hint_forward violated; retrying (30/100) 516 | hint_forward violated; retrying (31/100) 517 | hint_forward violated; retrying (32/100) 518 | hint_forward violated; retrying (33/100) 519 | hint_forward violated; retrying (34/100) 520 | hint_forward violated; retrying (35/100) 521 | hint_forward violated; retrying (36/100) 522 | hint_forward violated; retrying (37/100) 523 | hint_forward violated; retrying (38/100) 524 | hint_forward violated; retrying (39/100) 525 | hint_forward violated; retrying (40/100) 526 | hint_forward violated; retrying (41/100) 527 | hint_forward violated; retrying (42/100) 528 | hint_forward violated; retrying (43/100) 529 | hint_forward violated; retrying (44/100) 530 | hint_forward violated; retrying (45/100) 531 | hint_forward violated; retrying (46/100) 532 | hint_forward violated; retrying (47/100) 533 | hint_forward violated; retrying (48/100) 534 | hint_forward violated; retrying (49/100) 535 | hint_forward violated; retrying (50/100) 536 | hint_forward violated; retrying (51/100) 537 | hint_forward violated; retrying (52/100) 538 | hint_forward violated; retrying (53/100) 539 | hint_forward violated; retrying (54/100) 540 | hint_forward violated; retrying (55/100) 541 | hint_forward violated; retrying (56/100) 542 | grow: R=[-0.00216172 -0.06842739 -0.00622381] t=[ 0.20633197 -0.01105984 -0.59411618] len(inliers)=30 543 | started with: reco has 2852 points; new shot 1_5.jpg has 35 points in reco 544 | ended with: reco has 2896 points; new shot 1_5.jpg has 79 points in reco 545 | ---------------- 546 | grow_reconstruction(1_5.jpg, 1_6.jpg, hint_forward=True) 547 | Common tracks: 63 548 | hint_forward violated; retrying (1/100) 549 | hint_forward violated; retrying (2/100) 550 | hint_forward violated; retrying (3/100) 551 | hint_forward violated; retrying (4/100) 552 | grow: R=[ 0.00223516 -0.00106756 0.00446697] t=[ 0.04032459 -0.01307362 -0.69906059] len(inliers)=60 553 | started with: reco has 2896 points; new shot 1_6.jpg has 51 points in reco 554 | ended with: reco has 2961 points; new shot 1_6.jpg has 115 points in reco 555 | ---------------- 556 | grow_reconstruction(1_6.jpg, 1_7.jpg, hint_forward=True) 557 | Common tracks: 60 558 | hint_forward violated; retrying (1/100) 559 | hint_forward violated; retrying (2/100) 560 | hint_forward violated; retrying (3/100) 561 | hint_forward violated; retrying (4/100) 562 | grow: R=[-0.00468671 -0.00314354 0.01336077] t=[ 0.04654542 -0.0225075 -0.7105595 ] len(inliers)=46 563 | started with: reco has 2961 points; new shot 1_7.jpg has 45 points in reco 564 | ended with: reco has 3015 points; new shot 1_7.jpg has 98 points in reco 565 | ---------------- 566 | grow_reconstruction(1_7.jpg, 1_8.jpg, hint_forward=True) 567 | Common tracks: 51 568 | grow: R=[-0.00219868 -0.00721085 0.00024426] t=[ 0.1851348 -0.05729115 -2.91877135] len(inliers)=32 569 | started with: reco has 3015 points; new shot 1_8.jpg has 25 points in reco 570 | ended with: reco has 3066 points; new shot 1_8.jpg has 76 points in reco 571 | ---------------- 572 | grow_reconstruction(1_8.jpg, 1_9.jpg, hint_forward=True) 573 | Common tracks: 18 574 | hint_forward violated; retrying (1/100) 575 | hint_forward violated; retrying (2/100) 576 | hint_forward violated; retrying (3/100) 577 | hint_forward violated; retrying (4/100) 578 | grow: R=[ 0.00169435 0.00453511 0.00458934] t=[-0.00891637 -0.00896388 -1.01058756] len(inliers)=15 579 | started with: reco has 3066 points; new shot 1_9.jpg has 13 points in reco 580 | *** hint_forward violated; undoing bundle_single_view 581 | *** after undo: t_after_norm=[-0.00882227 -0.00886928 -0.99992175] 582 | ended with: reco has 3075 points; new shot 1_9.jpg has 17 points in reco 583 | ---------------- 584 | grow_reconstruction(1_0.jpg, 4_0.jpg, hint_forward=False) 585 | Common tracks: 57 586 | grow: R=[-0.10652426 0.02027031 -0.05700647] t=[-0.4639735 -0.09357338 -0.55507672] len(inliers)=56 587 | started with: reco has 3075 points; new shot 4_0.jpg has 84 points in reco 588 | ended with: reco has 3152 points; new shot 4_0.jpg has 161 points in reco 589 | ---------------- 590 | grow_reconstruction(4_0.jpg, 4_1.jpg, hint_forward=True) 591 | Common tracks: 257 592 | hint_forward violated; retrying (1/100) 593 | hint_forward violated; retrying (2/100) 594 | hint_forward violated; retrying (3/100) 595 | hint_forward violated; retrying (4/100) 596 | hint_forward violated; retrying (5/100) 597 | hint_forward violated; retrying (6/100) 598 | hint_forward violated; retrying (7/100) 599 | hint_forward violated; retrying (8/100) 600 | hint_forward violated; retrying (9/100) 601 | hint_forward violated; retrying (10/100) 602 | hint_forward violated; retrying (11/100) 603 | hint_forward violated; retrying (12/100) 604 | hint_forward violated; retrying (13/100) 605 | hint_forward violated; retrying (14/100) 606 | hint_forward violated; retrying (15/100) 607 | hint_forward violated; retrying (16/100) 608 | hint_forward violated; retrying (17/100) 609 | hint_forward violated; retrying (18/100) 610 | hint_forward violated; retrying (19/100) 611 | hint_forward violated; retrying (20/100) 612 | hint_forward violated; retrying (21/100) 613 | hint_forward violated; retrying (22/100) 614 | hint_forward violated; retrying (23/100) 615 | hint_forward violated; retrying (24/100) 616 | hint_forward violated; retrying (25/100) 617 | hint_forward violated; retrying (26/100) 618 | hint_forward violated; retrying (27/100) 619 | hint_forward violated; retrying (28/100) 620 | hint_forward violated; retrying (29/100) 621 | hint_forward violated; retrying (30/100) 622 | hint_forward violated; retrying (31/100) 623 | hint_forward violated; retrying (32/100) 624 | hint_forward violated; retrying (33/100) 625 | hint_forward violated; retrying (34/100) 626 | hint_forward violated; retrying (35/100) 627 | hint_forward violated; retrying (36/100) 628 | grow: R=[-0.00437519 0.00312561 -0.00189177] t=[ 0.09209921 -0.16337122 -0.86754977] len(inliers)=235 629 | started with: reco has 3152 points; new shot 4_1.jpg has 108 points in reco 630 | ended with: reco has 3320 points; new shot 4_1.jpg has 259 points in reco 631 | ---------------- 632 | grow_reconstruction(4_1.jpg, 4_2.jpg, hint_forward=True) 633 | Common tracks: 234 634 | hint_forward violated; retrying (1/100) 635 | hint_forward violated; retrying (2/100) 636 | hint_forward violated; retrying (3/100) 637 | hint_forward violated; retrying (4/100) 638 | hint_forward violated; retrying (5/100) 639 | hint_forward violated; retrying (6/100) 640 | hint_forward violated; retrying (7/100) 641 | hint_forward violated; retrying (8/100) 642 | grow: R=[-0.00605724 -0.00149243 0.00228509] t=[ 0.03437308 -0.0500458 -0.32368335] len(inliers)=206 643 | started with: reco has 3320 points; new shot 4_2.jpg has 135 points in reco 644 | ended with: reco has 3439 points; new shot 4_2.jpg has 253 points in reco 645 | ---------------- 646 | grow_reconstruction(4_2.jpg, 4_3.jpg, hint_forward=True) 647 | Common tracks: 204 648 | hint_forward violated; retrying (1/100) 649 | grow: R=[-0.00337301 -0.0047828 0.00736957] t=[ 0.0239294 -0.0317579 -0.24107198] len(inliers)=173 650 | started with: reco has 3439 points; new shot 4_3.jpg has 169 points in reco 651 | ended with: reco has 3559 points; new shot 4_3.jpg has 290 points in reco 652 | ---------------- 653 | grow_reconstruction(4_3.jpg, 4_4.jpg, hint_forward=True) 654 | Common tracks: 86 655 | grow: R=[-0.0002167 -0.0053691 0.00118032] t=[ 0.12106191 -0.16076039 -1.30648433] len(inliers)=72 656 | started with: reco has 3559 points; new shot 4_4.jpg has 94 points in reco 657 | ended with: reco has 3841 points; new shot 4_4.jpg has 373 points in reco 658 | ---------------- 659 | grow_reconstruction(4_4.jpg, 4_5.jpg, hint_forward=True) 660 | Common tracks: 50 661 | hint_forward violated; retrying (1/100) 662 | hint_forward violated; retrying (2/100) 663 | hint_forward violated; retrying (3/100) 664 | hint_forward violated; retrying (4/100) 665 | hint_forward violated; retrying (5/100) 666 | hint_forward violated; retrying (6/100) 667 | grow: R=[-0.00347155 -0.03916631 -0.00095792] t=[ 0.20353279 -0.08396699 -0.65797923] len(inliers)=39 668 | started with: reco has 3841 points; new shot 4_5.jpg has 54 points in reco 669 | ended with: reco has 3877 points; new shot 4_5.jpg has 90 points in reco 670 | ---------------- 671 | grow_reconstruction(4_5.jpg, 4_6.jpg, hint_forward=True) 672 | Common tracks: 44 673 | grow: R=[ 0.00454222 0.01537478 0.0037992 ] t=[-0.223861 -0.02610251 -0.69141267] len(inliers)=40 674 | started with: reco has 3877 points; new shot 4_6.jpg has 52 points in reco 675 | ended with: reco has 3923 points; new shot 4_6.jpg has 97 points in reco 676 | ---------------- 677 | grow_reconstruction(4_6.jpg, 4_7.jpg, hint_forward=True) 678 | Common tracks: 79 679 | grow: R=[-0.00319922 0.01353088 0.0076216 ] t=[ 4.39703314 -12.97489562 -98.81062627] len(inliers)=69 680 | started with: reco has 3923 points; new shot 4_7.jpg has 73 points in reco 681 | ended with: reco has 3984 points; new shot 4_7.jpg has 128 points in reco 682 | ---------------- 683 | grow_reconstruction(4_7.jpg, 4_8.jpg, hint_forward=True) 684 | Common tracks: 102 685 | hint_forward violated; retrying (1/100) 686 | hint_forward violated; retrying (2/100) 687 | hint_forward violated; retrying (3/100) 688 | hint_forward violated; retrying (4/100) 689 | hint_forward violated; retrying (5/100) 690 | grow: R=[-0.00228641 -0.00089035 0.00337012] t=[ 1.9213044 -2.65258939 -21.28968071] len(inliers)=79 691 | started with: reco has 3984 points; new shot 4_8.jpg has 87 points in reco 692 | ended with: reco has 4070 points; new shot 4_8.jpg has 172 points in reco 693 | ---------------- 694 | grow_reconstruction(4_8.jpg, 4_9.jpg, hint_forward=True) 695 | Common tracks: 179 696 | grow: R=[-0.00363003 0.00456753 -0.00172379] t=[ 0.38802958 -0.5854484 -4.01081641] len(inliers)=145 697 | started with: reco has 4070 points; new shot 4_9.jpg has 100 points in reco 698 | ended with: reco has 4233 points; new shot 4_9.jpg has 264 points in reco 699 | -------------------------------------------------------------------------------- /viewer/js/journey.js: -------------------------------------------------------------------------------- 1 | var Dijkstra = (function () { 2 | 3 | /** 4 | * A class for calculations on graphs using Dijkstra's algorithm. 5 | * @constructor 6 | */ 7 | function Dijkstra() { 8 | } 9 | 10 | // Private sort delegate for ordering key value pairs arranged 11 | // as an array of two items like [key, value]. 12 | var keyValueSorter = function (kv1, kv2) { 13 | return parseFloat(kv1[1]) - parseFloat(kv2[1]); 14 | } 15 | 16 | /** 17 | * Private function for running Dijkstra's Algorithm until an evaluation function decides to stop. 18 | * @param {Object} graph The graph with nodes and weights used for calculation. 19 | * @param {String} source The name of the source node. 20 | * @param {String} weight The name of the weight property. 21 | * @param {Function} evaluationFunc Function taking the current node and current distance as parameters 22 | * and returns true if the algorithm should finish. 23 | * @return {Object} An object with properties for the visited nodes, the previous nodes and the distances from 24 | * the source node. 25 | */ 26 | var dijkstra = function(graph, source, weight, evaluationFunc) { 27 | var touchedNodes = {}; 28 | var previous = {}; 29 | var distances = {}; 30 | var visited = {}; 31 | 32 | touchedNodes[source] = 0; 33 | previous[source] = undefined; 34 | distances[source] = 0; 35 | 36 | while (true) { 37 | var touched = []; 38 | for (var key in touchedNodes) { 39 | if (Object.prototype.hasOwnProperty.call(touchedNodes, key)) { 40 | touched.push([key, touchedNodes[key]]) 41 | } 42 | } 43 | 44 | // Stop if none of the unvisited nodes can be reached. 45 | if (!touched.length) { 46 | break; 47 | } 48 | 49 | // Select the unvisited node with smallest distance and mark it as current node. 50 | touched.sort(keyValueSorter); 51 | var currentNode = touched[0][0]; 52 | var currentDistance = touched[0][1] 53 | 54 | visited[currentNode] = true; 55 | delete touchedNodes[currentNode]; 56 | 57 | // Return if the evaluation of the current position returns true.. 58 | if (evaluationFunc(currentNode, currentDistance)) { 59 | break; 60 | } 61 | 62 | var currentEdges = graph.edges[currentNode] || {}; 63 | 64 | for (var node in currentEdges) { 65 | if (Object.prototype.hasOwnProperty.call(currentEdges, node)) { 66 | 67 | // Do not process already visited nodes. 68 | if (Object.prototype.hasOwnProperty.call(visited, node)) { 69 | continue; 70 | } 71 | 72 | // Calculate the total distance from the source to the node of 73 | // the current edge. 74 | var distance = currentEdges[node][weight]; 75 | var totalDistance = distances[currentNode] + distance; 76 | 77 | if (!distances[node] || totalDistance < distances[node]) 78 | { 79 | distances[node] = totalDistance; 80 | touchedNodes[node] = totalDistance; 81 | previous[node] = currentNode; 82 | } 83 | } 84 | } 85 | } 86 | 87 | return { visited: visited, distances: distances, previous: previous } 88 | } 89 | 90 | /** 91 | * Calculate the shortest path between two nodes in a graph using 92 | * Dijkstra's Algorithm. 93 | * @param {Object} graph The graph with nodes and weights used for calculation. 94 | * @param {String} source The name of the source node. 95 | * @param {String} target The name of the target node. 96 | * @param {String} weight The name of the weight property. 97 | * @return {Array} An array of node names corresponding to the path 98 | */ 99 | Dijkstra.prototype.shortestPath = function (graph, source, target, weight) { 100 | if (source === target) { 101 | return [source]; 102 | } 103 | 104 | var evaluationFunc = function (currentNode, currentDistance) { 105 | return currentNode === target; 106 | }; 107 | 108 | var result = dijkstra(graph, source, weight, evaluationFunc); 109 | 110 | // No path to the target was found. 111 | if (result.previous[target] === undefined) { 112 | return null; 113 | } 114 | 115 | // Retrieve a path from the dictionary of previous nodes and reverse it. 116 | var reversePath = []; 117 | var element = target; 118 | while (element !== undefined) { 119 | reversePath.push(element); 120 | element = result.previous[element]; 121 | } 122 | 123 | return reversePath.reverse(); 124 | } 125 | 126 | /** 127 | * Retrieve all other nodes within a distance from a source node based on the edge weights. 128 | * @param {Object} graph The graph with nodes and weights used for calculation. 129 | * @param {String} source The name of the source node. 130 | * @param {String} weight The name of the weight property. 131 | * @param {Number} distance The maximum distance between nodes. 132 | * @return {Array} An array of node names corresponding to nodes within a distance 133 | from the source node. 134 | */ 135 | Dijkstra.prototype.nodesWithinDistance = function (graph, source, distance, weight) { 136 | var evaluationFunc = function (currentNode, currentDistance) { 137 | return currentDistance >= distance; 138 | }; 139 | 140 | var result = dijkstra(graph, source, weight, evaluationFunc); 141 | 142 | var nodes = []; 143 | for (var node in result.visited) { 144 | if (Object.prototype.hasOwnProperty.call(result.visited, node)) { 145 | nodes.push(node); 146 | } 147 | } 148 | 149 | return nodes; 150 | } 151 | 152 | return Dijkstra; 153 | })(); 154 | 155 | var GraphHelper = (function () { 156 | 157 | /** 158 | * A class with helper functions for graphs. 159 | * @constructor 160 | */ 161 | function GraphHelper() { 162 | this.dijkstra = new Dijkstra(); 163 | } 164 | 165 | // Private function for getting a graph with edges of a type. 166 | var getTypeGraph = function (graph, type) { 167 | var typeGraph = { nodes: graph.nodes, edges: {} }; 168 | 169 | for (var k in graph.edges) { 170 | if (!Object.prototype.hasOwnProperty.call(graph.edges, k)) { 171 | continue; 172 | } 173 | 174 | typeGraph.edges[k] = {}; 175 | var edges = graph.edges[k][type]; 176 | 177 | for (var m in edges) { 178 | if (!Object.prototype.hasOwnProperty.call(edges, m)) { 179 | continue; 180 | } 181 | 182 | typeGraph.edges[k][m] = {}; 183 | 184 | edge_properties = edges[m]; 185 | 186 | for (var ep in edge_properties) { 187 | if (!Object.prototype.hasOwnProperty.call(edge_properties, ep)) { 188 | continue; 189 | } 190 | 191 | typeGraph.edges[k][m][ep] = edge_properties[ep]; 192 | } 193 | } 194 | } 195 | 196 | return typeGraph; 197 | } 198 | 199 | /** 200 | * Retrieves a graph with edges of a certain type. 201 | * @param {Object} graph The graph with nodes and weights used for calculation. 202 | * @param {String} type The name of the edge type. 203 | * @return {Array} A graph where all edges are of the specified type. 204 | */ 205 | GraphHelper.prototype.getTypeGraphs = function (graphs, type) { 206 | var typeGraphs = []; 207 | 208 | for (var i = 0; i < graphs.length; i++) { 209 | var typeGraph = getTypeGraph(graphs[i], type) 210 | typeGraphs.push(typeGraph); 211 | } 212 | 213 | return typeGraphs; 214 | } 215 | 216 | /** 217 | * Retrieves a graph with edges of a default type. The default type edges are overridden by 218 | * edges of the override type for nodes within a threshold distance from a target node. 219 | * @param {Object} graph The graph with nodes and weights used for calculation. 220 | * @param {String} defaultType The name of the default edge type. 221 | * @param {String} overrideType The name of the override edge type. 222 | * @param {String} target The name of the override target node from which the other override nodes are retrieved.. 223 | * @return {Array} A graph where all edges are of the specified type. 224 | */ 225 | GraphHelper.prototype.mergeTypeGraphs = function (graphs, defaultType, overrideType, target, distance) { 226 | var mergedGraphs = []; 227 | 228 | for (var i = 0; i < graphs.length; i++) { 229 | var graph = graphs[i]; 230 | var mergedGraph = getTypeGraph(graph, defaultType); 231 | 232 | if (graph.nodes.indexOf(target) > -1) { 233 | var overrideGraph = getTypeGraph(graph, overrideType); 234 | var overrideNodes = this.dijkstra.nodesWithinDistance(overrideGraph, target, distance, 'weight'); 235 | 236 | for (var i = 0; i < overrideNodes.length; i++) { 237 | overrideNode = overrideNodes[i]; 238 | mergedGraph.edges[overrideNode] = graph.edges[overrideNode][overrideType]; 239 | } 240 | } 241 | 242 | mergedGraphs.push(mergedGraph); 243 | } 244 | 245 | return mergedGraphs; 246 | } 247 | 248 | /** 249 | * Creates a graph with a penalty for certain properties with certain values. 250 | * @param {Object} graph The graph with nodes and weights used for calculation. 251 | * @param {String} type The name of the weight key. 252 | * @param {String} type The name of the penalty key. 253 | * @param {Dictionary} penalties Dictionary of penalty keys with respective penalty amount. 254 | * @return {Object} A graph with edge weights as sum of original weight and specified penalty. 255 | */ 256 | GraphHelper.prototype.getPenaltyGraph = function (graph, weightKey, penaltyKey, penalties) { 257 | 258 | var penaltyGraph = { edges: {} }; 259 | 260 | for (var k in graph.edges) { 261 | if (!Object.prototype.hasOwnProperty.call(graph.edges, k)) { 262 | continue; 263 | } 264 | 265 | penaltyGraph.edges[k] = {}; 266 | var edges = graph.edges[k]; 267 | 268 | for (var m in edges) { 269 | if (!Object.prototype.hasOwnProperty.call(edges, m)) { 270 | continue; 271 | } 272 | 273 | penaltyGraph.edges[k][m] = {}; 274 | 275 | // Add penalty to weight if the value of the penalty key corresponds 276 | // to the specified penalty value. 277 | if (edges[m][penaltyKey] in penalties) { 278 | penaltyGraph.edges[k][m][weightKey] = edges[m][weightKey] + penalties[edges[m][penaltyKey]]; 279 | } 280 | else { 281 | penaltyGraph.edges[k][m][weightKey] = edges[m][weightKey]; 282 | } 283 | } 284 | } 285 | 286 | return penaltyGraph; 287 | } 288 | 289 | /** 290 | * Retrieves the directed edge between a start node and an adjacent end node. 291 | * @param {Integer} graph The graph. 292 | * @param {String} from The name of the node for which the edge starts. 293 | * @param {String} to The name of the node for which the edge ends. 294 | * @return {Dictionary} The edge properties. 295 | */ 296 | GraphHelper.prototype.getEdgeProperties = function(graph, from, to) { 297 | var nodeEdges = graph.edges[from]; 298 | var edgeProperties = nodeEdges[to]; 299 | return edgeProperties; 300 | } 301 | 302 | return GraphHelper; 303 | })(); 304 | 305 | var LinearCurve = THREE.Curve.create( 306 | 307 | function (points) { 308 | 309 | this.points = (points == undefined) ? [] : points; 310 | }, 311 | 312 | function (t) { 313 | 314 | var points = this.points; 315 | var point = (points.length - 1) * t; 316 | 317 | var intPoint = Math.floor(point); 318 | var weight = point - intPoint; 319 | 320 | var point1 = points[intPoint]; 321 | var point2 = points[intPoint > points.length - 2 ? points.length - 1 : intPoint + 1]; 322 | 323 | var vector = new THREE.Vector3(); 324 | vector.copy(point1).lerp(point2, weight); 325 | 326 | return vector; 327 | } 328 | ); 329 | 330 | var JourneyBase = (function () { 331 | 332 | /** 333 | * A journey base. 334 | * @constructor 335 | * @param {String} graphs A list of graphs. 336 | * @param {String} shots Dictionary of shots with positions and targets. 337 | * @param {String} intervalTime The interval for navigation. 338 | * @param {Object} penalty Object specifying a weight key, penalty key and a dictionary of penalty keys values 339 | with penalty amounts. 340 | */ 341 | function JourneyBase(graphs, shots, intervalTime, penalty) { 342 | this.graphs = graphs; 343 | this.shots = shots; 344 | this.intervalTime = intervalTime; 345 | this.penalty = penalty; 346 | 347 | this.started = false; 348 | this.preCount = 15; 349 | this.dijkstra = new Dijkstra(); 350 | this.graphHelper = new GraphHelper(); 351 | } 352 | 353 | /** 354 | * Sets the interval time. 355 | * @param {Integer} intervalTime 356 | */ 357 | JourneyBase.prototype.updateInterval = function (intervalTime) { 358 | this.intervalTime = intervalTime; 359 | } 360 | 361 | /** 362 | * Calculate the shortest path between two nodes in a graph. 363 | * @param {String} from 364 | * @param {String} to 365 | * @return {Array} An array of node names corresponding to the path 366 | */ 367 | JourneyBase.prototype.shortestPath = function (from, to) { 368 | var index = null; 369 | for (var i = 0; i < this.graphs.length; i++) { 370 | // Ensure that both nodes exist in the graph. 371 | if (this.graphs[i].nodes.indexOf(from) > -1 && 372 | this.graphs[i].nodes.indexOf(to) > -1) { 373 | index = i; 374 | break; 375 | } 376 | } 377 | 378 | if (index === null) { 379 | return null; 380 | } 381 | 382 | var journeyGraph = this.graphs[index]; 383 | if (this.penalty) { 384 | journeyGraph = 385 | this.graphHelper.getPenaltyGraph( 386 | journeyGraph, 387 | this.penalty.weightKey, 388 | this.penalty.penaltyKey, 389 | this.penalty.values); 390 | } 391 | 392 | var path = this.dijkstra.shortestPath(journeyGraph, from, to, 'weight'); 393 | 394 | return path === null ? null : { path: path, index: index }; 395 | } 396 | 397 | /** 398 | * Creates a geometry based on an existing property of the nodes in the path. 399 | * @param {Array} path Nodes to create geometry from. 400 | * @param {String} property Name of the shot property to use. 401 | * @return {THREE.Geometry} A geometry for the path. 402 | */ 403 | JourneyBase.prototype.getGeometry = function (path, property) { 404 | var geometry = new THREE.Geometry(); 405 | 406 | for (var i = 0; i < path.length; i++) { 407 | var shot_id = path[i]; 408 | var vertex = this.shots[shot_id][property]; 409 | geometry.vertices.push(new THREE.Vector3(vertex.x, vertex.y, vertex.z)); 410 | } 411 | 412 | return geometry; 413 | } 414 | 415 | /** 416 | * Creates a geometry based on the positions of the nodes in the 417 | * shortest path between the specified nodes. 418 | * @param {String} from Name of the start node. 419 | * @param {String} to Name of the end node. 420 | * @return {THREE.Geometry} A geometry for the shortest path between the nodes. 421 | */ 422 | JourneyBase.prototype.getPathGeometry = function (from, to) { 423 | 424 | var result = this.shortestPath(from, to); 425 | if (result === null || result.path.length <= 1) { 426 | return null; 427 | } 428 | 429 | return this.getGeometry(result.path, 'position'); 430 | } 431 | 432 | /** 433 | * Retrieves the directed edge between a start node and an adjacent end node. 434 | * @param {Integer} graphIndex The index of the graph. 435 | * @param {String} from The name of the node for which the edge starts. 436 | * @param {String} to The name of the node for which the edge ends. 437 | * @return {Dictionary} The edge properties. 438 | */ 439 | JourneyBase.prototype.getEdge = function(graphIndex, from, to) { 440 | return this.graphHelper.getEdgeProperties(this.graphs[graphIndex], from, to); 441 | } 442 | 443 | /** 444 | * Gets a value indicating whether a journey is ongoing. 445 | * @return {Boolean} A value indicating whether a journey is ongoing. 446 | */ 447 | JourneyBase.prototype.isStarted = function () { 448 | return this.started; 449 | } 450 | 451 | /** 452 | * Virtual base class method to overwrite and implement in subclasses. 453 | * @param {Number} from 454 | * @param {Number} to 455 | */ 456 | JourneyBase.prototype.start = function (from, to) { 457 | console.log( "Warning, start() not implemented!" ); 458 | } 459 | 460 | /** 461 | * Virtual base class method to overwrite and implement in subclasses. 462 | * @param {Boolean} continuation Indicates if a continuation action should be invoked. 463 | */ 464 | JourneyBase.prototype.stop = function (continuation) { 465 | console.log( "Warning, stop() not implemented!" ); 466 | } 467 | 468 | return JourneyBase; 469 | })(); 470 | 471 | var Journey = (function () { 472 | 473 | /** 474 | * A journey. 475 | * @constructor 476 | * @extends {JourneyBase} 477 | * @param {Array} graphs A list of graphs. 478 | * @param {Object} shots Dictionary of shots. 479 | * @param {Number} intervalTime The maximum time between navigation. 480 | * @param {Function} navigationAction The action to execute on navigation. 481 | * @param {Function} startAction The action to run when starting a journey. 482 | * @param {Function} stopAction The action to run when stopping a journey. 483 | * @param {Function} preloadAction The action to run when stopping a journey. 484 | * @param {Object} penalty Object specifying a weight key, penalty key and a dictionary of penalty keys values 485 | with penalty amounts. 486 | */ 487 | function Journey( 488 | graphs, 489 | shots, 490 | intervalTime, 491 | navigationAction, 492 | startAction, 493 | stopAction, 494 | preloadAction, 495 | penalty) { 496 | 497 | JourneyBase.apply(this, [graphs, shots, intervalTime, penalty]); 498 | 499 | this.navigationAction = navigationAction; 500 | this.startAction = startAction; 501 | this.stopAction = stopAction; 502 | this.preloadAction = preloadAction; 503 | this.graphIndex = null; 504 | this.path = null; 505 | this.timeoutToken = null; 506 | this.currentIndex = null; 507 | } 508 | 509 | // Inheriting from JourneyBase 510 | Journey.prototype = Object.create(JourneyBase.prototype); 511 | Journey.prototype.constructor = Journey; 512 | 513 | // Private function for calculating the interval value. The max distance of an edge is 514 | // 20. The interval is the weight multiplied with the desired interval time for one unit. 515 | // A smallest value is defined to avoid too fast navigation.. 516 | var getInterval = function (edges, node, intervalTime) { 517 | var distance = edges[node].weight; 518 | return Math.max(distance * intervalTime, 0.7 * 1000); 519 | } 520 | 521 | // Private callback function for setInterval. 522 | var navigation = function () { 523 | if (this.started !== true) { 524 | this.stop(); 525 | return; 526 | } 527 | 528 | var _this = this; 529 | if (!isFinite(this.intervalTime)) { 530 | this.timeoutToken = window.setTimeout(function () { navigation.call(_this); }, 500); 531 | return; 532 | } 533 | 534 | var pathLength = this.path.length; 535 | this.currentIndex++; 536 | 537 | if (this.currentIndex >= pathLength) { 538 | this.stop(); 539 | return; 540 | } 541 | 542 | this.navigationAction(this.path[this.currentIndex]); 543 | 544 | if (this.currentIndex === pathLength - 1) { 545 | this.stop(); 546 | return; 547 | } 548 | 549 | if (this.currentIndex + this.preCount <= pathLength - 1) { 550 | this.preloadAction([this.path[this.currentIndex + this.preCount]]); 551 | } 552 | 553 | var currentInterval = 554 | getInterval( 555 | this.graphs[this.graphIndex].edges[this.path[this.currentIndex - 1]], 556 | this.path[this.currentIndex], 557 | this.intervalTime); 558 | 559 | this.timeoutToken = window.setTimeout(function () { navigation.call(_this); }, currentInterval); 560 | } 561 | 562 | /** 563 | * Starts a journey between two nodes in a graph. 564 | * @param {Number} from 565 | * @param {Number} to 566 | */ 567 | Journey.prototype.start = function (from, to) { 568 | if (this.started === true) { 569 | return; 570 | } 571 | 572 | var result = this.shortestPath(from, to); 573 | if (result === null || result.path.length <= 1) { 574 | return; 575 | } 576 | 577 | this.started = true; 578 | this.path = result.path; 579 | this.preloadAction(this.path.slice(1, Math.min(this.preCount, this.path.length))); 580 | 581 | this.graphIndex = result.index; 582 | this.currentIndex = 0; 583 | this.startAction(); 584 | this.navigationAction(this.path[this.currentIndex]); 585 | 586 | var _this = this; 587 | this.timeoutToken = window.setTimeout(function () { navigation.call(_this); }, 500); 588 | } 589 | 590 | /** 591 | * Stops an ongoing journey between two nodes in a graph. 592 | */ 593 | Journey.prototype.stop = function (continuation) { 594 | if (this.timeoutToken === null || this.started === false) { 595 | return; 596 | } 597 | 598 | window.clearTimeout(this.timeoutToken); 599 | 600 | this.graphIndex = null; 601 | this.path = null; 602 | this.timeoutToken = null; 603 | this.currentIndex = null; 604 | 605 | this.stopAction(); 606 | 607 | this.started = false; 608 | } 609 | 610 | return Journey; 611 | })(); 612 | 613 | var SmoothJourney = (function () { 614 | 615 | /** 616 | * A smooth journey. 617 | * @constructor 618 | * @extends {JourneyBase} 619 | * @param {Array} graphs A list of graphs. 620 | * @param {Object} shots Dictionary of shots. 621 | * @param {Number} intervalTime The maximum time between navigation. 622 | * @param {Function} navigationAction The action to execute on navigation. 623 | * @param {Function} nodeAction The action to execute when a node should be displayed. 624 | * @param {Function} startAction The action to run when starting a journey. 625 | * @param {Function} stopAction The action to run when stopping a journey. 626 | * @param {Function} continuationAction The action to execute when the journey is stopped for smooth stopping. 627 | * @param {Function} preloadAction The action to run when stopping a journey. 628 | * @param {Function} speedFunction Function returning speed coefficient based on the current position between nodes. 629 | * @param {Type} curveType The type of the curve used for movement. Must inherit from THREE.Curve. 630 | * @param {Object} penalty Object specifying a weight key, penalty key and a dictionary of penalty keys values 631 | with penalty amounts. 632 | */ 633 | function SmoothJourney( 634 | graphs, 635 | shots, 636 | intervalTime, 637 | navigationAction, 638 | nodeAction, 639 | startAction, 640 | stopAction, 641 | continuationAction, 642 | preloadAction, 643 | speedFunction, 644 | curveType, 645 | penalty) { 646 | 647 | JourneyBase.apply(this, [graphs, shots, intervalTime, penalty]); 648 | 649 | this.navigationAction = navigationAction; 650 | this.nodeAction = nodeAction; 651 | this.startAction = startAction; 652 | this.stopAction = stopAction; 653 | this.continuationAction = continuationAction; 654 | this.preloadAction = preloadAction; 655 | this.speedFunction = speedFunction; 656 | this.curveType = curveType; 657 | 658 | this.graphIndex = null; 659 | this.path = null; 660 | this.positionCurve = null; 661 | this.targetCurve = null; 662 | this.intervalToken = null; 663 | this.previousTime = null; 664 | this.currentIndex = null; 665 | this.u = null; 666 | this.t = null; 667 | } 668 | 669 | // Inheriting from JourneyBase 670 | SmoothJourney.prototype = Object.create(JourneyBase.prototype); 671 | SmoothJourney.prototype.constructor = SmoothJourney; 672 | 673 | // Private function for calculating the current position and target based 674 | // on the elapsed time, interval and the curve. 675 | var move = function () { 676 | if (this.started !== true) { 677 | return; 678 | } 679 | 680 | var currentTime = Date.now(); 681 | 682 | // Pause movement if the interval time is infinity. 683 | if (!isFinite(this.intervalTime)) { 684 | this.previousTime = currentTime; 685 | return; 686 | } 687 | 688 | var elapsed = currentTime - this.previousTime; 689 | 690 | var previousPoint = (this.path.length - 1) * this.t; 691 | var previousIndex = Math.floor(previousPoint); 692 | var previousFraction = this.u >= 1 ? 1 : previousPoint - previousIndex; 693 | var previousEdge = this.getEdge(this.graphIndex, this.path[this.currentIndex], this.path[this.currentIndex + 1]); 694 | 695 | var speedCoefficient = this.speedFunction(previousFraction, previousEdge); 696 | elapsed = speedCoefficient * elapsed; 697 | 698 | this.previousTime = currentTime; 699 | var totalTime = this.intervalTime * this.positionCurve.getLength(); 700 | 701 | this.u = Math.min(this.u + (elapsed / totalTime), 1); 702 | 703 | // Retrieve t from the position curve to calculate index. 704 | this.t = this.positionCurve.getUtoTmapping(this.u); 705 | var point = (this.path.length - 1) * this.t; 706 | var index = Math.floor(point); 707 | 708 | if (index > this.currentIndex && index < this.path.length - 1) { 709 | this.currentIndex = index; 710 | 711 | var startIndex = Math.min(2 + this.currentIndex * 3, this.currentIndex + this.preCount); 712 | var endIndex = Math.min(5 + this.currentIndex * 3, this.currentIndex + this.preCount + 1); 713 | 714 | if (endIndex <= this.path.length) { 715 | this.preloadAction(this.path.slice(startIndex, endIndex)); 716 | } 717 | 718 | this.nodeAction(this.path[this.currentIndex + 1]); 719 | } 720 | 721 | var position = this.positionCurve.getPoint(this.t); 722 | var target = this.targetCurve.getPoint(this.t); 723 | 724 | // Do not reset the weight after reaching the last node. 725 | var fraction = this.u >= 1 ? 1 : point - index; 726 | var edge = this.getEdge(this.graphIndex, this.path[this.currentIndex], this.path[this.currentIndex + 1]); 727 | 728 | this.navigationAction(position, target, fraction, edge); 729 | 730 | if (this.u >= 1) { 731 | this.stop(false); 732 | } 733 | } 734 | 735 | /** 736 | * Starts a smooth journey between two nodes in a graph. 737 | * @param {Number} from Start node. 738 | * @param {Number} to End node. 739 | */ 740 | SmoothJourney.prototype.start = function (from, to) { 741 | if (this.started === true) { 742 | return; 743 | } 744 | 745 | var result = this.shortestPath(from, to); 746 | if (result === null || result.path.length <= 1) { 747 | return; 748 | } 749 | 750 | this.started = true; 751 | this.path = result.path; 752 | this.graphIndex = result.index; 753 | 754 | var startIndex = Math.min(2, this.path.length - 1); 755 | var endIndex = Math.min(5, this.path.length); 756 | this.preloadAction(this.path.slice(startIndex, endIndex)); 757 | 758 | var positions = this.getGeometry(this.path, 'position').vertices; 759 | var targets = this.getGeometry(this.path, 'target').vertices; 760 | 761 | this.positionCurve = new (Function.prototype.bind.apply(this.curveType, [null, positions])); 762 | this.targetCurve = new (Function.prototype.bind.apply(this.curveType, [null, targets])); 763 | 764 | this.previousTime = Date.now(); 765 | this.u = 0; 766 | this.t = 0; 767 | this.currentIndex = 0; 768 | 769 | this.startAction(); 770 | 771 | this.nodeAction(this.path[this.currentIndex + 1]); 772 | 773 | var position = this.positionCurve.getPointAt(0); 774 | var target = this.targetCurve.getPointAt(0); 775 | 776 | var edge = this.getEdge(this.graphIndex, this.path[this.currentIndex], this.path[this.currentIndex + 1]); 777 | this.navigationAction(position, target, 0, edge); 778 | 779 | _this = this; 780 | this.intervalToken = window.setInterval(function () { move.call(_this); }, 1000/60); 781 | } 782 | 783 | /** 784 | * Stops a smooth journey. 785 | * @param {Boolean} continuation Specifying if the continuation action should be invoked. 786 | */ 787 | SmoothJourney.prototype.stop = function (continuation) { 788 | if (this.intervalToken === null || this.started === false) { 789 | return; 790 | } 791 | 792 | window.clearInterval(this.intervalToken); 793 | this.intervalToken = null; 794 | 795 | var nextIndex = Math.min(this.currentIndex + 1, this.path.length - 1); 796 | var nextNode = this.path[nextIndex]; 797 | 798 | this.graphIndex = null; 799 | this.path = null; 800 | this.positionCurve = null; 801 | this.targetCurve = null; 802 | this.previousTime = null; 803 | this.currentIndex = null; 804 | this.u = null; 805 | this.t = null; 806 | 807 | if (continuation === true) { 808 | this.continuationAction(nextNode); 809 | } 810 | 811 | this.stopAction(); 812 | 813 | this.started = false; 814 | } 815 | 816 | /** 817 | * Sets the curve type for a smooth journey. 818 | * @param {Type} curveType The type of the curve used for movement. Must inherit from THREE.Curve. 819 | */ 820 | SmoothJourney.prototype.setCurveType = function (curveType) { 821 | this.curveType = curveType; 822 | 823 | if (this.started === false) { 824 | return; 825 | } 826 | 827 | var positions = this.getGeometry(this.path, 'position').vertices; 828 | var targets = this.getGeometry(this.path, 'target').vertices; 829 | 830 | this.positionCurve = new (Function.prototype.bind.apply(this.curveType, [null, positions])); 831 | this.targetCurve = new (Function.prototype.bind.apply(this.curveType, [null, targets])); 832 | } 833 | 834 | return SmoothJourney; 835 | })(); 836 | 837 | var JourneyWrapper = (function ($) { 838 | 839 | /** 840 | * A journey wrapper. 841 | * The journey wrapper uses global objects declared in the reconstruction script. 842 | * @constructor 843 | */ 844 | function JourneyWrapper() { 845 | this.initialized = false; 846 | this.journey = null; 847 | this.destination = null; 848 | this.line = null; 849 | this.curveType = null; 850 | this.showPathController = null; 851 | 852 | this.graphHelper = new GraphHelper(); 853 | } 854 | 855 | // Private function for calculating the desired time for moving one unit. 856 | var getInterval = function () { 857 | var interval = null; 858 | if (controls.animationSpeed === 0) { 859 | interval = Infinity; 860 | } 861 | else { 862 | // Calculate the time it should take to cover the distance of one unit during navigation. 863 | interval = (-2.4 + 1.7 / Math.sqrt(controls.animationSpeed)) * 1000 / 20; 864 | } 865 | 866 | return interval; 867 | } 868 | 869 | // Private function converting shot dictionary with rotations and translations 870 | // values to shot dictionary with optical centers and viewing directions. 871 | var convertShots = function () { 872 | var shots = {}; 873 | for (var r = 0; r < reconstructions.length; ++r) { 874 | var newShots = reconstructions[r].shots; 875 | shots = $.extend(shots, newShots); 876 | } 877 | 878 | var result = {}; 879 | 880 | for (var shot_id in shots) { 881 | if (!Object.prototype.hasOwnProperty.call(shots, shot_id)) { 882 | continue; 883 | } 884 | 885 | var shot = shots[shot_id]; 886 | 887 | var position = opticalCenter(shot); 888 | 889 | var camera = getCamera(shot_id); 890 | var cam = camera.reconstruction['cameras'][shot['camera']]; 891 | var target = pixelToVertex(cam, shot, 0, 0, 20); 892 | 893 | result[shot_id] = { 'position': position, 'target': target }; 894 | } 895 | 896 | return result; 897 | } 898 | 899 | // Private function for start action of journey. 900 | var start = function () { 901 | setMovingMode('walk'); 902 | $('#journeyButton').html('X'); 903 | } 904 | 905 | // Private function for stop action of journey. 906 | var stop = function () { 907 | $('#journeyButton').html('Go'); 908 | } 909 | 910 | // Private function for preloading images. 911 | var preload = function (shot_ids) { 912 | for (var i = 0; i < shot_ids.length; i++) { 913 | var tempImg = new Image(); 914 | tempImg.src = imageURL(shot_ids[i]); 915 | } 916 | } 917 | 918 | // Private function for retrieving a camera based on the id. 919 | var getCamera = function (shot_id) { 920 | var camera = null; 921 | for (var i = 0; i < camera_lines.length; ++i) { 922 | if (camera_lines[i].shot_id === shot_id) { 923 | camera = camera_lines[i]; 924 | } 925 | } 926 | 927 | return camera; 928 | } 929 | 930 | // Private function for navigation action of journey. Retrieves a camera, 931 | // creates its image plane and navigates to it. 932 | var navigation = function (shot_id) { 933 | var camera = getCamera(shot_id); 934 | if (camera === null) { 935 | return; 936 | } 937 | 938 | setImagePlaneCamera(camera); 939 | navigateToShot(camera); 940 | } 941 | 942 | // Private function for setting the position and direction of the orbit controls camera 943 | // used for the smooth navigation movement as well as controlling the image plane opacity. 944 | var smoothNavigation = function (position, target, fraction, edge) { 945 | controls.goto(position, target); 946 | options.imagePlaneOpacity = 1 - mapFraction(fraction, edge); 947 | } 948 | 949 | // Private function which retrieves a camera and creates its image plane. 950 | var smoothNodeAction = function (shot_id) { 951 | var camera = getCamera(shot_id); 952 | if (camera === null) { 953 | return; 954 | } 955 | 956 | setImagePlaneCamera(camera); 957 | } 958 | 959 | // Private function for continuing the movement to the next node when a journey is stopped. 960 | var smoothContinuation = function (shot_id) { 961 | var camera = getCamera(shot_id); 962 | navigateToShot(camera); 963 | } 964 | 965 | // Private function for mapping the fraction in [0, 1] to another fraction in [0, 1] based on the edge. 966 | var mapFraction = function (fraction, edge) { 967 | var length = edge.weight; 968 | var transitionLength = ['step_forward', 'step_backward'].indexOf(edge.direction) > -1 ? 4 : edge.weight; 969 | var lowerBound = Math.max((length - transitionLength) / (2 * length), 0); 970 | var upperBound = Math.min((length + transitionLength) / (2 * length), 1); 971 | 972 | var result = (fraction - lowerBound) / (upperBound - lowerBound); 973 | 974 | return Math.min(Math.max(result, 0), 1); 975 | } 976 | 977 | // Private function for determining the speed for the position between nodes based on the edge. 978 | var speedFunction = function (fraction, edge) { 979 | var general = 1; 980 | var increase = 0; 981 | 982 | switch (edge.direction) { 983 | case 'turn_left': 984 | case 'turn_right': 985 | general = edge.weight / Math.max(edge.weight, 6); 986 | break; 987 | case 'turn_u': 988 | general = edge.weight / Math.max(edge.weight, 8); 989 | break; 990 | case 'step_forward': 991 | case 'step_backward': 992 | // Speed increase by a maximum of 0.35 multiplied by coefficient based on edge weight. 993 | var k = Math.min(Math.max(edge.weight - 2, 0), 2) * 3 / 4; 994 | increase = k * Math.abs(0.35 - Math.min(Math.abs(fraction - 0.65), 0.35)); 995 | break; 996 | default: 997 | break; 998 | } 999 | 1000 | return general * (1 + increase); 1001 | } 1002 | 1003 | /** 1004 | * Initializes a journey wrapper. 1005 | * @param {shots} Dictionary of shots with rotation and translation arrays. 1006 | */ 1007 | JourneyWrapper.prototype.initialize = function (shots) { 1008 | if ('nav' in urlParams && 'dest' in urlParams) { 1009 | 1010 | this.destination = urlParams.dest; 1011 | this.curveType = THREE.SplineCurve3; 1012 | var _this = this; 1013 | 1014 | var penalty = { 1015 | weightKey: 'weight', 1016 | penaltyKey: 'direction', 1017 | values: { 1018 | step_backward: 30, 1019 | turn_u: 15, 1020 | turn_left: 3, 1021 | turn_right: 3, 1022 | step_left: 1, 1023 | step_right: 1 1024 | } 1025 | }; 1026 | 1027 | $.getJSON(urlParams.nav, function(data) { 1028 | 1029 | var graphs = _this.graphHelper.mergeTypeGraphs(data, 'pref', 'pos', _this.destination, 10) 1030 | 1031 | _this.journey = 1032 | 'jou' in urlParams && urlParams.jou === 'basic' ? 1033 | new Journey( 1034 | graphs, 1035 | convertShots(shots), 1036 | getInterval(), 1037 | navigation, 1038 | start, 1039 | stop, 1040 | preload, 1041 | penalty) : 1042 | new SmoothJourney( 1043 | graphs, 1044 | convertShots(shots), 1045 | getInterval(), 1046 | smoothNavigation, 1047 | smoothNodeAction, 1048 | start, 1049 | stop, 1050 | smoothContinuation, 1051 | preload, 1052 | speedFunction, 1053 | _this.curveType, 1054 | penalty); 1055 | 1056 | _this.initialized = true; 1057 | 1058 | options.showPath = false; 1059 | options.curveType = 'Spline'; 1060 | f1.add(options, 'curveType', ['Spline', 'Linear']) 1061 | .onChange(function (value) { 1062 | var curveType; 1063 | switch (value) { 1064 | case 'Spline': 1065 | curveType = THREE.SplineCurve3; 1066 | break; 1067 | case 'Linear': 1068 | curveType = LinearCurve; 1069 | break; 1070 | default: 1071 | curveType = THREE.SplineCurve3; 1072 | } 1073 | 1074 | _this.setCurveType(curveType); 1075 | }); 1076 | 1077 | $('#journeyButton').show(); 1078 | 1079 | if ('img' in urlParams && selectedCamera !== undefined) { 1080 | window.setTimeout(function () { _this.toggle(); }, 700) 1081 | } 1082 | else { 1083 | _this.addShowPathController(); 1084 | } 1085 | }); 1086 | } 1087 | } 1088 | 1089 | /** 1090 | * Gets a value indicating whether a journey is ongoing. 1091 | * @return {Boolean} A value indicating whether a journey is ongoing. 1092 | */ 1093 | JourneyWrapper.prototype.isStarted = function () { 1094 | if (this.initialized !== true) { 1095 | return false; 1096 | } 1097 | 1098 | return this.journey.isStarted(); 1099 | } 1100 | 1101 | /** 1102 | * Gets a value indicating whether a journey type is smooth. 1103 | * @return {Boolean} A value indicating whether a journey type is smooth. 1104 | */ 1105 | JourneyWrapper.prototype.isSmooth = function () { 1106 | if (this.initialized !== true) { 1107 | return false; 1108 | } 1109 | 1110 | return this.journey instanceof SmoothJourney 1111 | } 1112 | 1113 | /** 1114 | * Updates the interval. 1115 | */ 1116 | JourneyWrapper.prototype.updateInterval = function () { 1117 | if (this.initialized !== true) { 1118 | return; 1119 | } 1120 | 1121 | this.journey.updateInterval(getInterval()); 1122 | } 1123 | 1124 | /** 1125 | * Stops a journey. 1126 | */ 1127 | JourneyWrapper.prototype.stop = function () { 1128 | if (this.initialized !== true) { 1129 | return; 1130 | } 1131 | 1132 | if (this.journey.isStarted() === true) { 1133 | this.journey.stop(false); 1134 | } 1135 | } 1136 | 1137 | /** 1138 | * Toggles the journey state between started and stopped. 1139 | */ 1140 | JourneyWrapper.prototype.toggle = function () { 1141 | if (this.initialized !== true) { 1142 | return; 1143 | } 1144 | 1145 | if (this.journey.isStarted() === true) { 1146 | this.journey.stop(true); 1147 | return; 1148 | } 1149 | 1150 | if (selectedCamera === undefined) { 1151 | return; 1152 | } 1153 | 1154 | this.journey.updateInterval(getInterval()); 1155 | this.journey.start(selectedCamera.shot_id, this.destination); 1156 | } 1157 | 1158 | /** 1159 | * Shows the shortest path in the scene. 1160 | */ 1161 | JourneyWrapper.prototype.showPath = function () { 1162 | if (this.initialized !== true 1163 | || selectedCamera === undefined 1164 | || movingMode !== 'orbit' 1165 | || options.showPath !== true){ 1166 | return; 1167 | } 1168 | 1169 | this.hidePath(); 1170 | 1171 | var pathGeometry = this.journey.getPathGeometry(selectedCamera.shot_id, this.destination); 1172 | if (pathGeometry === null) { 1173 | return; 1174 | } 1175 | 1176 | var curve = new (Function.prototype.bind.apply(this.curveType, [null, pathGeometry.vertices])); 1177 | var length = curve.getLength(); 1178 | var nbrOfPoints = length * 5; 1179 | var curveGeometry = new THREE.Geometry(); 1180 | curveGeometry.vertices = curve.getPoints(nbrOfPoints); 1181 | 1182 | var material = new THREE.LineBasicMaterial({ 1183 | color: 0xffff88, 1184 | linewidth: 5 1185 | }); 1186 | 1187 | this.line = new THREE.Line(curveGeometry, material); 1188 | this.line.name = 'shortestPath' 1189 | scene.add(this.line); 1190 | render(); 1191 | } 1192 | 1193 | /** 1194 | * Hides the shortest path from the scene. 1195 | */ 1196 | JourneyWrapper.prototype.hidePath = function () { 1197 | if (this.initialized !== true || this.line === null){ 1198 | return; 1199 | } 1200 | 1201 | if (this.line !== null) { 1202 | var sceneLine = scene.getObjectByName(this.line.name); 1203 | scene.remove(sceneLine); 1204 | this.line = null; 1205 | render(); 1206 | } 1207 | } 1208 | 1209 | /** 1210 | * Sets the curve type of a journey 1211 | * @param {Type} curveType The type of the curve used for movement. Must inherit from THREE.Curve. 1212 | */ 1213 | JourneyWrapper.prototype.setCurveType = function (curveType) { 1214 | if (this.initialized !== true) { 1215 | return; 1216 | } 1217 | 1218 | this.curveType = curveType; 1219 | 1220 | if (this.isSmooth()) { 1221 | this.journey.setCurveType(curveType); 1222 | } 1223 | 1224 | if (options.showPath === true) { 1225 | this.showPath(); 1226 | } 1227 | } 1228 | 1229 | /** 1230 | * Adds a show path checkbox to the options. 1231 | */ 1232 | JourneyWrapper.prototype.addShowPathController = function () { 1233 | if (this.initialized !== true || this.showPathController !== null){ 1234 | return; 1235 | } 1236 | 1237 | _this = this; 1238 | this.showPathController = f1.add(options, 'showPath') 1239 | .listen() 1240 | .onChange(function () { 1241 | if (options.showPath === true && selectedCamera !== undefined) { 1242 | _this.showPath(); 1243 | } 1244 | else { 1245 | _this.hidePath(); 1246 | } 1247 | }); 1248 | 1249 | if (options.showPath === true) { 1250 | this.showPath(); 1251 | } 1252 | } 1253 | 1254 | /** 1255 | * Removes the show path checkbox from the options. 1256 | */ 1257 | JourneyWrapper.prototype.removeShowPathController = function () { 1258 | if (this.initialized !== true || this.showPathController === null){ 1259 | return; 1260 | } 1261 | 1262 | this.hidePath(); 1263 | f1.remove(this.showPathController); 1264 | this.showPathController = null; 1265 | } 1266 | 1267 | return JourneyWrapper; 1268 | })(jQuery); 1269 | 1270 | var journeyWrapper = new JourneyWrapper(); --------------------------------------------------------------------------------