├── .gitignore
├── LICENSE
├── README.md
├── assets
├── dense.jpg
├── hololens_20.jpg
├── logos.svg
├── navvis_00159.jpg
├── phone.jpg
├── single_465_cam_phone_465_2.png
└── sparse.png
└── demo
├── __init__.py
├── evaluation.py
├── localization.py
├── pipeline.py
├── utils.py
└── visualization.py
/.gitignore:
--------------------------------------------------------------------------------
1 | outputs/
2 | data/
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | pip-wheel-metadata/
27 | share/python-wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *.cover
53 | *.py,cover
54 | .hypothesis/
55 | .pytest_cache/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-ShareAlike 4.0 International Public
2 | License
3 |
4 | By exercising the Licensed Rights (defined below), You accept and agree
5 | to be bound by the terms and conditions of this Creative Commons
6 | Attribution-ShareAlike 4.0 International Public License ("Public
7 | License"). To the extent this Public License may be interpreted as a
8 | contract, You are granted the Licensed Rights in consideration of Your
9 | acceptance of these terms and conditions, and the Licensor grants You
10 | such rights in consideration of benefits the Licensor receives from
11 | making the Licensed Material available under these terms and
12 | conditions.
13 |
14 |
15 | Section 1 -- Definitions.
16 |
17 | a. Adapted Material means material subject to Copyright and Similar
18 | Rights that is derived from or based upon the Licensed Material
19 | and in which the Licensed Material is translated, altered,
20 | arranged, transformed, or otherwise modified in a manner requiring
21 | permission under the Copyright and Similar Rights held by the
22 | Licensor. For purposes of this Public License, where the Licensed
23 | Material is a musical work, performance, or sound recording,
24 | Adapted Material is always produced where the Licensed Material is
25 | synched in timed relation with a moving image.
26 |
27 | b. Adapter's License means the license You apply to Your Copyright
28 | and Similar Rights in Your contributions to Adapted Material in
29 | accordance with the terms and conditions of this Public License.
30 |
31 | c. BY-SA Compatible License means a license listed at
32 | creativecommons.org/compatiblelicenses, approved by Creative
33 | Commons as essentially the equivalent of this Public License.
34 |
35 | d. Copyright and Similar Rights means copyright and/or similar rights
36 | closely related to copyright including, without limitation,
37 | performance, broadcast, sound recording, and Sui Generis Database
38 | Rights, without regard to how the rights are labeled or
39 | categorized. For purposes of this Public License, the rights
40 | specified in Section 2(b)(1)-(2) are not Copyright and Similar
41 | Rights.
42 |
43 | e. Effective Technological Measures means those measures that, in the
44 | absence of proper authority, may not be circumvented under laws
45 | fulfilling obligations under Article 11 of the WIPO Copyright
46 | Treaty adopted on December 20, 1996, and/or similar international
47 | agreements.
48 |
49 | f. Exceptions and Limitations means fair use, fair dealing, and/or
50 | any other exception or limitation to Copyright and Similar Rights
51 | that applies to Your use of the Licensed Material.
52 |
53 | g. License Elements means the license attributes listed in the name
54 | of a Creative Commons Public License. The License Elements of this
55 | Public License are Attribution and ShareAlike.
56 |
57 | h. Licensed Material means the artistic or literary work, database,
58 | or other material to which the Licensor applied this Public
59 | License.
60 |
61 | i. Licensed Rights means the rights granted to You subject to the
62 | terms and conditions of this Public License, which are limited to
63 | all Copyright and Similar Rights that apply to Your use of the
64 | Licensed Material and that the Licensor has authority to license.
65 |
66 | j. Licensor means the individual(s) or entity(ies) granting rights
67 | under this Public License.
68 |
69 | k. Share means to provide material to the public by any means or
70 | process that requires permission under the Licensed Rights, such
71 | as reproduction, public display, public performance, distribution,
72 | dissemination, communication, or importation, and to make material
73 | available to the public including in ways that members of the
74 | public may access the material from a place and at a time
75 | individually chosen by them.
76 |
77 | l. Sui Generis Database Rights means rights other than copyright
78 | resulting from Directive 96/9/EC of the European Parliament and of
79 | the Council of 11 March 1996 on the legal protection of databases,
80 | as amended and/or succeeded, as well as other essentially
81 | equivalent rights anywhere in the world.
82 |
83 | m. You means the individual or entity exercising the Licensed Rights
84 | under this Public License. Your has a corresponding meaning.
85 |
86 |
87 | Section 2 -- Scope.
88 |
89 | a. License grant.
90 |
91 | 1. Subject to the terms and conditions of this Public License,
92 | the Licensor hereby grants You a worldwide, royalty-free,
93 | non-sublicensable, non-exclusive, irrevocable license to
94 | exercise the Licensed Rights in the Licensed Material to:
95 |
96 | a. reproduce and Share the Licensed Material, in whole or
97 | in part; and
98 |
99 | b. produce, reproduce, and Share Adapted Material.
100 |
101 | 2. Exceptions and Limitations. For the avoidance of doubt, where
102 | Exceptions and Limitations apply to Your use, this Public
103 | License does not apply, and You do not need to comply with
104 | its terms and conditions.
105 |
106 | 3. Term. The term of this Public License is specified in Section
107 | 6(a).
108 |
109 | 4. Media and formats; technical modifications allowed. The
110 | Licensor authorizes You to exercise the Licensed Rights in
111 | all media and formats whether now known or hereafter created,
112 | and to make technical modifications necessary to do so. The
113 | Licensor waives and/or agrees not to assert any right or
114 | authority to forbid You from making technical modifications
115 | necessary to exercise the Licensed Rights, including
116 | technical modifications necessary to circumvent Effective
117 | Technological Measures. For purposes of this Public License,
118 | simply making modifications authorized by this Section 2(a)
119 | (4) never produces Adapted Material.
120 |
121 | 5. Downstream recipients.
122 |
123 | a. Offer from the Licensor -- Licensed Material. Every
124 | recipient of the Licensed Material automatically
125 | receives an offer from the Licensor to exercise the
126 | Licensed Rights under the terms and conditions of this
127 | Public License.
128 |
129 | b. Additional offer from the Licensor -- Adapted Material.
130 | Every recipient of Adapted Material from You
131 | automatically receives an offer from the Licensor to
132 | exercise the Licensed Rights in the Adapted Material
133 | under the conditions of the Adapter's License You apply.
134 |
135 | c. No downstream restrictions. You may not offer or impose
136 | any additional or different terms or conditions on, or
137 | apply any Effective Technological Measures to, the
138 | Licensed Material if doing so restricts exercise of the
139 | Licensed Rights by any recipient of the Licensed
140 | Material.
141 |
142 | 6. No endorsement. Nothing in this Public License constitutes or
143 | may be construed as permission to assert or imply that You
144 | are, or that Your use of the Licensed Material is, connected
145 | with, or sponsored, endorsed, or granted official status by,
146 | the Licensor or others designated to receive attribution as
147 | provided in Section 3(a)(1)(A)(i).
148 |
149 | b. Other rights.
150 |
151 | 1. Moral rights, such as the right of integrity, are not
152 | licensed under this Public License, nor are publicity,
153 | privacy, and/or other similar personality rights; however, to
154 | the extent possible, the Licensor waives and/or agrees not to
155 | assert any such rights held by the Licensor to the limited
156 | extent necessary to allow You to exercise the Licensed
157 | Rights, but not otherwise.
158 |
159 | 2. Patent and trademark rights are not licensed under this
160 | Public License.
161 |
162 | 3. To the extent possible, the Licensor waives any right to
163 | collect royalties from You for the exercise of the Licensed
164 | Rights, whether directly or through a collecting society
165 | under any voluntary or waivable statutory or compulsory
166 | licensing scheme. In all other cases the Licensor expressly
167 | reserves any right to collect such royalties.
168 |
169 |
170 | Section 3 -- License Conditions.
171 |
172 | Your exercise of the Licensed Rights is expressly made subject to the
173 | following conditions.
174 |
175 | a. Attribution.
176 |
177 | 1. If You Share the Licensed Material (including in modified
178 | form), You must:
179 |
180 | a. retain the following if it is supplied by the Licensor
181 | with the Licensed Material:
182 |
183 | i. identification of the creator(s) of the Licensed
184 | Material and any others designated to receive
185 | attribution, in any reasonable manner requested by
186 | the Licensor (including by pseudonym if
187 | designated);
188 |
189 | ii. a copyright notice;
190 |
191 | iii. a notice that refers to this Public License;
192 |
193 | iv. a notice that refers to the disclaimer of
194 | warranties;
195 |
196 | v. a URI or hyperlink to the Licensed Material to the
197 | extent reasonably practicable;
198 |
199 | b. indicate if You modified the Licensed Material and
200 | retain an indication of any previous modifications; and
201 |
202 | c. indicate the Licensed Material is licensed under this
203 | Public License, and include the text of, or the URI or
204 | hyperlink to, this Public License.
205 |
206 | 2. You may satisfy the conditions in Section 3(a)(1) in any
207 | reasonable manner based on the medium, means, and context in
208 | which You Share the Licensed Material. For example, it may be
209 | reasonable to satisfy the conditions by providing a URI or
210 | hyperlink to a resource that includes the required
211 | information.
212 |
213 | 3. If requested by the Licensor, You must remove any of the
214 | information required by Section 3(a)(1)(A) to the extent
215 | reasonably practicable.
216 |
217 | b. ShareAlike.
218 |
219 | In addition to the conditions in Section 3(a), if You Share
220 | Adapted Material You produce, the following conditions also apply.
221 |
222 | 1. The Adapter's License You apply must be a Creative Commons
223 | license with the same License Elements, this version or
224 | later, or a BY-SA Compatible License.
225 |
226 | 2. You must include the text of, or the URI or hyperlink to, the
227 | Adapter's License You apply. You may satisfy this condition
228 | in any reasonable manner based on the medium, means, and
229 | context in which You Share Adapted Material.
230 |
231 | 3. You may not offer or impose any additional or different terms
232 | or conditions on, or apply any Effective Technological
233 | Measures to, Adapted Material that restrict exercise of the
234 | rights granted under the Adapter's License You apply.
235 |
236 |
237 | Section 4 -- Sui Generis Database Rights.
238 |
239 | Where the Licensed Rights include Sui Generis Database Rights that
240 | apply to Your use of the Licensed Material:
241 |
242 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right
243 | to extract, reuse, reproduce, and Share all or a substantial
244 | portion of the contents of the database;
245 |
246 | b. if You include all or a substantial portion of the database
247 | contents in a database in which You have Sui Generis Database
248 | Rights, then the database in which You have Sui Generis Database
249 | Rights (but not its individual contents) is Adapted Material,
250 |
251 | including for purposes of Section 3(b); and
252 | c. You must comply with the conditions in Section 3(a) if You Share
253 | all or a substantial portion of the contents of the database.
254 |
255 | For the avoidance of doubt, this Section 4 supplements and does not
256 | replace Your obligations under this Public License where the Licensed
257 | Rights include other Copyright and Similar Rights.
258 |
259 |
260 | Section 5 -- Disclaimer of Warranties and Limitation of Liability.
261 |
262 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
263 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
264 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
265 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
266 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
267 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
268 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
269 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
270 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
271 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
272 |
273 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
274 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
275 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
276 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
277 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
278 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
279 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
280 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
281 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
282 |
283 | c. The disclaimer of warranties and limitation of liability provided
284 | above shall be interpreted in a manner that, to the extent
285 | possible, most closely approximates an absolute disclaimer and
286 | waiver of all liability.
287 |
288 |
289 | Section 6 -- Term and Termination.
290 |
291 | a. This Public License applies for the term of the Copyright and
292 | Similar Rights licensed here. However, if You fail to comply with
293 | this Public License, then Your rights under this Public License
294 | terminate automatically.
295 |
296 | b. Where Your right to use the Licensed Material has terminated under
297 | Section 6(a), it reinstates:
298 |
299 | 1. automatically as of the date the violation is cured, provided
300 | it is cured within 30 days of Your discovery of the
301 | violation; or
302 |
303 | 2. upon express reinstatement by the Licensor.
304 |
305 | For the avoidance of doubt, this Section 6(b) does not affect any
306 | right the Licensor may have to seek remedies for Your violations
307 | of this Public License.
308 |
309 | c. For the avoidance of doubt, the Licensor may also offer the
310 | Licensed Material under separate terms or conditions or stop
311 | distributing the Licensed Material at any time; however, doing so
312 | will not terminate this Public License.
313 |
314 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
315 | License.
316 |
317 |
318 | Section 7 -- Other Terms and Conditions.
319 |
320 | a. The Licensor shall not be bound by any additional or different
321 | terms or conditions communicated by You unless expressly agreed.
322 |
323 | b. Any arrangements, understandings, or agreements regarding the
324 | Licensed Material not stated herein are separate from and
325 | independent of the terms and conditions of this Public License.
326 |
327 |
328 | Section 8 -- Interpretation.
329 |
330 | a. For the avoidance of doubt, this Public License does not, and
331 | shall not be interpreted to, reduce, limit, restrict, or impose
332 | conditions on any use of the Licensed Material that could lawfully
333 | be made without permission under this Public License.
334 |
335 | b. To the extent possible, if any provision of this Public License is
336 | deemed unenforceable, it shall be automatically reformed to the
337 | minimum extent necessary to make it enforceable. If the provision
338 | cannot be reformed, it shall be severed from this Public License
339 | without affecting the enforceability of the remaining terms and
340 | conditions.
341 |
342 | c. No term or condition of this Public License will be waived and no
343 | failure to comply consented to unless expressly agreed to by the
344 | Licensor.
345 |
346 | d. Nothing in this Public License constitutes or may be interpreted
347 | as a limitation upon, or waiver of, any privileges and immunities
348 | that apply to the Licensor or You, including from the legal
349 | processes of any jurisdiction or authority.
350 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ETH-MS localization dataset
2 |
3 |
4 |
5 | The [Computer Vision & Geometry Group at ETH Zurich](http://www.cvg.ethz.ch/) and the [Microsoft Mixed Reality & AI Lab Zurich](https://www.microsoft.com/en-us/research/lab/mixed-reality-ai-zurich/) introduce a new dataset for visual localization with a focus on Augmented Reality scenarios. The data covers day & night illumination changes, large indoor & outdoor environments, and different sensor configurations for handheld and head-mounted devices.
6 |
7 | > [!IMPORTANT]
8 | > This is a now-deprecated preview of our [follow-up and much larger LaMAR dataset](https://lamar.ethz.ch/), of which this preview is only a small subset. LaMAR includes lidar point clouds and meshes and high-frequency HoloLens & phone sensor streams that include images, depth, IMU, etc. Please use LaMAR instead of this preview, which was released for the [ICCV 2021 workshop on Long-Term Visual Localization under Changing Conditions](https://sites.google.com/view/ltvl2021/).
9 |
10 |
11 |
12 | Left: dense 3D model of the capture area. Right: sparse 3D map and poses of the mapping images.
13 |
14 |
15 | ## Data
16 |
17 | We provide images captured at the HG building of the ETH Zurich campus, both in the main halls and on the sidewalk. This environments is challenging as it exhibits many self-similarities and symmetric structures. The images are split into mapping and query images. The **mapping** images were captured by the 6-camera rig of a NavVis M6 mobile scanner. The **query** images are extracted from sequences recorded months apart by:
18 |
19 | - an **iPhone 8 as single images** from the back camera,
20 | - a **HoloLens2 as sets of 4 images** from the rig of 4 tracking cameras.
21 |
22 | | Type | Sensor | Resolution | # images | Date |
23 | | --------------------- | ------------------------------- | ---------- | -------- | ----------------------------------------- |
24 | | Mapping | NavVis M6, RGB cameras | 1442x1920 | 6 x 819 | 2021-02-18 (morning) |
25 | | Queries: single-image | iPhone 8 ARKit, RGB back camera | 1440x1920 | 300 | 2021-04-29 (day) 2021-05-04 (night) |
26 | | Queries: rigs | HoloLens2, grayscale cameras | 480x640 | 4 x 300 | 2021-02-18 (morning) |
27 |
28 |
29 |
30 | 6 images captured by the NavVis M6 camera rig
31 |
39 |
40 | 4 images captured by the HoloLens2 camera rig
41 |
42 |
43 | We provide the poses of the mapping images but hold private those of the query images. We however provide intrinsic camera calibrations for all images and relative rig poses of the HoloLens2. We have obfuscated the temporal order of the images to prevent challenge participants from relying on image sequences.
44 |
45 | The data is available at the following location in the [Kapture](https://github.com/naver/kapture) format: https://cvg-data.inf.ethz.ch/eth_ms_dataset/iccv2021 The timestamps and camera (or rig) IDs of the single (and rig) queries are listed in the files `queries_single.txt` and `queries_rigs.txt`.
46 |
47 | ## ICCV 2021 Challenge
48 |
49 | The goal of the challenge is to estimate the poses of the single query images (iPhone) and of the camera rigs (HoloLens2) listed in the files `queries_[single|rigs].txt`. Challenge participants are expected to submit a single text file to the evaluation server at [visuallocalization.net](https://www.visuallocalization.net/). Each line of this file corresponds to the pose of one single image or rig in the format:
50 | ```
51 | timestamp/camera_or_rig_id qw qx qy qz tx ty tz
52 | ```
53 |
54 | The pose is expressed as quaternion `qw qx qy qz` and camera translation `tx ty tz` in the COLMAP coordinate system, i.e. **from the world to camera frame**. Here is an example of two poses from the submission file generated by the demo script below:
55 |
56 | ```
57 | 0/cam_phone_0_2 -0.08691328955558617 -0.19368004528425714 0.834072988867356 -0.5091722394231546 16.350978764689117 33.55611563888155 -57.70510693949592
58 | 1/hetrig_1 0.0005166642265923816 0.8648426950562507 0.0027338466179127005 -0.5020352297882964 -31.154051098151406 9.907290815759488 58.16937396700082
59 | ```
60 | The submission file should be composed of a total of 600 queries.
61 |
62 | ## Localization demo
63 |
64 | We provide a baseline method with [hloc](https://github.com/cvg/Hierarchical-Localization) using SuperPoint+SuperGlue for image matching and NetVLAD for image retrieval. The pipeline estimates absolute poses of the queries using P3P for the single images and a generalized solver GP3P for the camera rigs.
65 |
66 | Reauirements:
67 | - Python >= 3.6
68 | - latest commits of [COLMAP](https://colmap.github.io/) and [pycolmap](https://github.com/mihaidusmanu/pycolmap)
69 | - [hloc](https://github.com/cvg/Hierarchical-Localization) and its dependencies
70 | - [kapture](https://github.com/naver/kapture): `pip install kapture`
71 |
72 | To download the data and run the demo pipeline:
73 | ```
74 | wget https://cvg-data.inf.ethz.ch/eth_ms_dataset/iccv2021/data.zip && unzip data.zip
75 | python3 -m demo.pipeline
76 | ```
77 |
78 | This will create a submission file in `outputs/netvlad+superpoint+superglue/results.txt` as well as visualizations in `outputs/netvlad+superpoint+superglue/viz/`:
79 |
80 |
81 |
82 |
83 |
84 |
85 | Once submitted to the benchmark, this should roughly give the following recall:
86 | | Method | Single-image queries | Rig queries |
87 | | --------------------------------------------------------------------------------- | -------------------- | ------------------ |
88 | | [NetVLAD+SuperPoint+SuperGlue](https://www.visuallocalization.net/details/31514/) | 41.0 / 51.7 / 57.7 | 70.0 / 74.3 / 75.0 |
89 |
90 | for (orientation, distance) thresholds (1°, 10cm) / (2°, 25cm) / (5°, 1m).
91 |
92 |
93 | ## Citation
94 |
95 | Please cite the dataset as below if you use this preview data in an academic publication:
96 | ```bibtex
97 | @misc{eth_ms_visloc_2021,
98 | title = {{The ETH-Microsoft Localization Dataset}},
99 | author = {{ETH Zurich Computer Vision Group and Microsoft Mixed Reality \& AI Lab Zurich}},
100 | howpublished = {\url{https://github.com/cvg/visloc-iccv2021}},
101 | year = 2021,
102 | }
103 | ```
104 |
105 | ## Privacy
106 |
107 | We did our best to anonymize the data by blurring all faces and license plates visible in the images. Please let us know if you find any issue or are not satisfied with the level of anonymization. You can reach out to Paul-Edouard at `psarlin at ethz dot ch`
108 |
109 | ## License
110 |
111 | The data and code are provided under the Creative Commons license [Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)](https://creativecommons.org/licenses/by-sa/4.0/). This means that you must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. **Note that this license does not cover external modules that are available only under restrictive licenses, such as SuperPoint and SuperGlue.**
112 |
--------------------------------------------------------------------------------
/assets/dense.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/dense.jpg
--------------------------------------------------------------------------------
/assets/hololens_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/hololens_20.jpg
--------------------------------------------------------------------------------
/assets/logos.svg:
--------------------------------------------------------------------------------
1 |
2 |
112 |
--------------------------------------------------------------------------------
/assets/navvis_00159.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/navvis_00159.jpg
--------------------------------------------------------------------------------
/assets/phone.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/phone.jpg
--------------------------------------------------------------------------------
/assets/single_465_cam_phone_465_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/single_465_cam_phone_465_2.png
--------------------------------------------------------------------------------
/assets/sparse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/assets/sparse.png
--------------------------------------------------------------------------------
/demo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cvg/visloc-iccv2021/ee4a94ea569b6441b6d8865a7559d15d01bd7e9c/demo/__init__.py
--------------------------------------------------------------------------------
/demo/evaluation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from pathlib import Path
3 | import numpy as np
4 |
5 | import kapture
6 | from kapture.io.csv import kapture_from_dir
7 | from kapture.algo.pose_operations import pose_transform_distance
8 |
9 | from .utils import parse_query_list, parse_submission
10 |
11 |
12 | def evaluate(kapture_path: Path, poses_path: Path, queries_single: Path, queries_rigs: Path):
13 | skip_heavy_useless = [kapture.RecordsLidar, kapture.RecordsWifi,
14 | kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
15 | kapture.Matches, kapture.Points3d, kapture.Observations]
16 | kapture_ = kapture_from_dir(kapture_path, skip_list=skip_heavy_useless)
17 | if kapture_.trajectories is None:
18 | raise ValueError('The query Kapture does not have ground truth poses.')
19 |
20 | Ts_w2c = parse_submission(poses_path)
21 | Ts_w2c_gt = kapture_.trajectories
22 | keys_single = parse_query_list(queries_single)
23 | keys_rigs = parse_query_list(queries_rigs)
24 | keys = keys_single + keys_rigs
25 | is_rig = np.array([False] * len(keys_single) + [True] * len(keys_rigs))
26 |
27 | err_r, err_t = [], []
28 | for key in keys:
29 | T_w2c_gt = Ts_w2c_gt[key]
30 | if key in Ts_w2c:
31 | dt, dr = pose_transform_distance(Ts_w2c[key].inverse(), T_w2c_gt.inverse())
32 | dr = np.rad2deg(dr)
33 | else:
34 | dr = np.inf
35 | dt = np.inf
36 | err_r.append(dr)
37 | err_t.append(dt)
38 | err_r = np.stack(err_r)
39 | err_t = np.stack(err_t)
40 |
41 | threshs = [(1, 0.1), (2, 0.25), (5, 1.)]
42 | recalls = [
43 | np.mean((err_r < th_r) & (err_t < th_t)) for th_r, th_t in threshs]
44 | recalls_single = [
45 | np.mean((err_r[~is_rig] < th_r) & (err_t[~is_rig] < th_t)) for th_r, th_t in threshs]
46 | recalls_rigs = [
47 | np.mean((err_r[is_rig] < th_r) & (err_t[is_rig] < th_t)) for th_r, th_t in threshs]
48 |
49 | results = {'recall': recalls,
50 | 'recall_single': recalls_single,
51 | 'recall_rigs': recalls_rigs,
52 | 'Rt_thresholds': threshs}
53 | print('Results:', results)
54 |
55 |
56 | if __name__ == '__main__':
57 | parser = argparse.ArgumentParser()
58 | parser.add_argument('--data_path', type=Path, default=Path('data/'))
59 | parser.add_argument('--name', type=str, default='netvlad+superpoint+superglue')
60 | parser.add_argument('--output_path', type=Path, default=Path('./outputs/'))
61 | args = parser.parse_args()
62 | evaluate(
63 | args.data_path / 'query',
64 | args.output_path / args.name / 'results.txt',
65 | args.data_path / 'queries_single.txt',
66 | args.data_path / 'queries_rigs.txt')
67 |
--------------------------------------------------------------------------------
/demo/localization.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from pathlib import Path
3 | from typing import Dict, List
4 | from collections import defaultdict
5 | from math import ceil
6 | import numpy as np
7 | from tqdm import tqdm
8 |
9 | import pycolmap
10 |
11 | import kapture
12 | from kapture import Camera, PoseTransform
13 | from kapture.io.csv import kapture_from_dir
14 |
15 | from hloc.utils.parsers import parse_retrieval
16 | from hloc.utils.read_write_model import read_model, Image, Point3D
17 |
18 | from .utils import parse_query_list, get_keypoints, get_matches, camera_to_dict
19 | from .visualization import plot_pnp_inliers, dump_plot
20 |
21 |
22 | def localize(paths: Path, config: dict, num_visualize: int = 20) -> kapture.Trajectories:
23 |
24 | skip_heavy_useless = [kapture.Trajectories,
25 | kapture.RecordsLidar, kapture.RecordsWifi,
26 | kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
27 | kapture.Matches, kapture.Points3d, kapture.Observations]
28 | kapture_ = kapture_from_dir(paths.kapture_query, skip_list=skip_heavy_useless)
29 | rig_to_sensors = defaultdict(list)
30 | for rig_id, sensor_id in kapture_.rigs.key_pairs():
31 | rig_to_sensors[rig_id].append(sensor_id)
32 |
33 | keys_single = parse_query_list(paths.queries_single)
34 | keys_rigs = parse_query_list(paths.queries_rigs)
35 | poses = kapture.Trajectories()
36 |
37 | logging.info('Reading the sparse SfM model...')
38 | _, sfm_images, sfm_points = read_model(paths.sfm)
39 | sfm_name_to_id = {im.name: i for i, im in sfm_images.items()}
40 | pairs = parse_retrieval(paths.pairs_loc)
41 |
42 | logging.info('Localizing single queries...')
43 | for idx, (ts, camera_id) in enumerate(tqdm(keys_single)):
44 | name = kapture_.records_camera[ts, camera_id]
45 | camera = kapture_.sensors[camera_id]
46 | refs = pairs[name][:config['num_pairs_loc']]
47 | ref_ids = [sfm_name_to_id[n] for n in refs]
48 | T_world2cam, ret = estimate_camera_pose(
49 | name, ref_ids, camera, sfm_images, sfm_points,
50 | paths.lfeats, paths.matches_loc, config['pnp_reprojection_thresh'])
51 | if T_world2cam is not None:
52 | poses[ts, camera_id] = T_world2cam
53 |
54 | if num_visualize > 0 and idx % ceil(len(keys_single)/num_visualize) == 0:
55 | plot_pnp_inliers(
56 | paths.images_query / name, ref_ids, ret, sfm_images, sfm_points, paths.images_map)
57 | dump_plot(paths.viz / f'single_{ts}_{camera_id}.png')
58 |
59 | logging.info('Localizing camera rigs...')
60 | for idx, (ts, rig_id) in enumerate(tqdm(keys_rigs)):
61 | assert rig_id in rig_to_sensors, (rig_id, rig_to_sensors.keys())
62 | camera_ids = rig_to_sensors[rig_id]
63 | names = [kapture_.records_camera[ts, i] for i in camera_ids]
64 | cameras = [kapture_.sensors[i] for i in camera_ids]
65 | T_cams2rig = [kapture_.rigs[rig_id, i].inverse() for i in camera_ids]
66 | ref_ids = [[sfm_name_to_id[r] for r in pairs[q][:config['num_pairs_loc']]] for q in names]
67 | T_world2rig, ret = estimate_camera_pose_rig(
68 | names, ref_ids, cameras, T_cams2rig, sfm_images, sfm_points,
69 | paths.lfeats, paths.matches_loc, config['pnp_reprojection_thresh_rig'])
70 |
71 | # recover camera poses from the rig pose
72 | if T_world2rig is not None:
73 | poses[ts, rig_id] = T_world2rig
74 |
75 | return poses
76 |
77 |
78 | def estimate_camera_pose(query: str, ref_ids: List[int], camera: Camera,
79 | sfm_images: Dict[int, Image], sfm_points: Dict[int, Point3D],
80 | query_features: Path, match_file: Path, thresh: float) -> PoseTransform:
81 |
82 | p2d, = get_keypoints(query_features, [query])
83 | p2d_to_p3d = defaultdict(list)
84 | p2d_to_p3d_to_dbs = defaultdict(lambda: defaultdict(list))
85 | num_matches = 0
86 |
87 | refs = [sfm_images[i].name for i in ref_ids]
88 | all_matches = get_matches(match_file, zip([query]*len(refs), refs))
89 |
90 | for idx, (ref_id, matches) in enumerate(zip(ref_ids, all_matches)):
91 | p3d_ids = sfm_images[ref_id].point3D_ids
92 | if len(p3d_ids) == 0:
93 | logging.warning('No 3D points found for %s.', sfm_images[ref_id].name)
94 | continue
95 | matches = matches[p3d_ids[matches[:, 1]] != -1]
96 | num_matches += len(matches)
97 |
98 | for i, j in matches:
99 | p3d_id = p3d_ids[j]
100 | p2d_to_p3d_to_dbs[i][p3d_id].append(idx)
101 | # avoid duplicate observations
102 | if p3d_id not in p2d_to_p3d[i]:
103 | p2d_to_p3d[i].append(p3d_id)
104 |
105 | idxs = list(p2d_to_p3d.keys())
106 | p2d_idxs = [i for i in idxs for _ in p2d_to_p3d[i]]
107 | p2d_m = p2d[p2d_idxs]
108 | p2d_m += 0.5 # COLMAP coordinates
109 |
110 | p3d_ids = [j for i in idxs for j in p2d_to_p3d[i]]
111 | p3d_m = [sfm_points[j].xyz for j in p3d_ids]
112 | p3d_m = np.array(p3d_m).reshape(-1, 3)
113 |
114 | # mostly for logging and post-processing
115 | p3d_matched_dbs = [(j, p2d_to_p3d_to_dbs[i][j])
116 | for i in idxs for j in p2d_to_p3d[i]]
117 |
118 | ret = pycolmap.absolute_pose_estimation(p2d_m, p3d_m, camera_to_dict(camera), thresh)
119 |
120 | if ret['success']:
121 | T_w2cam = PoseTransform(ret['qvec'], ret['tvec'])
122 | else:
123 | T_w2cam = None
124 |
125 | ret = {
126 | **ret, 'p2d_q': p2d_m, 'p3d_r': p3d_m, 'p3d_ids': p3d_ids,
127 | 'num_matches': num_matches, 'p3d_matched_dbs': p3d_matched_dbs}
128 | return T_w2cam, ret
129 |
130 |
131 | def estimate_camera_pose_rig(queries: List[str], ref_ids_list: List[List[int]],
132 | cameras: List[Camera], T_cams2rig: List[PoseTransform],
133 | sfm_images: Dict[int, Image], sfm_points: Dict[int, Point3D],
134 | query_features: Path, match_file: Path, thresh: float) -> PoseTransform:
135 | p2d_m_list = []
136 | p3d_m_list = []
137 | p3d_ids_list = []
138 | p3d_matched_dbs_list = []
139 | num_matches_list = []
140 | for query, ref_ids in zip(queries, ref_ids_list):
141 | p2d, = get_keypoints(query_features, [query])
142 | p2d_to_p3d = defaultdict(list)
143 | p2d_to_p3d_to_dbs = defaultdict(lambda: defaultdict(list))
144 | num_matches = 0
145 |
146 | refs = [sfm_images[i].name for i in ref_ids]
147 | all_matches = get_matches(match_file, zip([query]*len(refs), refs))
148 |
149 | for idx, (ref_id, matches) in enumerate(zip(ref_ids, all_matches)):
150 | p3d_ids = sfm_images[ref_id].point3D_ids
151 | if len(p3d_ids) == 0:
152 | logging.warning('No 3D points found for %s.', sfm_images[ref_id].name)
153 | continue
154 | matches = matches[p3d_ids[matches[:, 1]] != -1]
155 | num_matches += len(matches)
156 |
157 | for i, j in matches:
158 | p3d_id = p3d_ids[j]
159 | p2d_to_p3d_to_dbs[i][p3d_id].append(idx)
160 | # avoid duplicate observations
161 | if p3d_id not in p2d_to_p3d[i]:
162 | p2d_to_p3d[i].append(p3d_id)
163 |
164 | idxs = list(p2d_to_p3d.keys())
165 | p2d_idxs = [i for i in idxs for _ in p2d_to_p3d[i]]
166 | p2d_m = p2d[p2d_idxs]
167 | p2d_m += 0.5 # COLMAP coordinates
168 |
169 | p3d_ids = [j for i in idxs for j in p2d_to_p3d[i]]
170 | p3d_m = [sfm_points[j].xyz for j in p3d_ids]
171 | p3d_m = np.array(p3d_m).reshape(-1, 3)
172 |
173 | # mostly for logging and post-processing
174 | p3d_matched_dbs = [(j, p2d_to_p3d_to_dbs[i][j])
175 | for i in idxs for j in p2d_to_p3d[i]]
176 |
177 | # Save for pose estimation.
178 | p2d_m_list.append(p2d_m)
179 | p3d_m_list.append(p3d_m)
180 | p3d_ids_list.append(p3d_ids)
181 | p3d_matched_dbs_list.append(p3d_matched_dbs)
182 | num_matches_list.append(num_matches)
183 |
184 | camera_dicts = [camera_to_dict(camera) for camera in cameras]
185 | rel_poses = [T.inverse() for T in T_cams2rig]
186 | qvecs = [p.r_raw for p in rel_poses]
187 | tvecs = [p.t for p in rel_poses]
188 |
189 | ret = pycolmap.rig_absolute_pose_estimation(
190 | p2d_m_list, p3d_m_list, camera_dicts, qvecs, tvecs, thresh)
191 |
192 | if ret['success']:
193 | T_w2rig = PoseTransform(ret['qvec'], ret['tvec'])
194 | else:
195 | T_w2rig = None
196 |
197 | ret = {
198 | **ret, 'p2d_q': p2d_m_list, 'p3d_r': p3d_m_list, 'p3d_ids': p3d_ids_list,
199 | 'num_matches': num_matches_list, 'p3d_matched_dbs': p3d_matched_dbs_list}
200 | return T_w2rig, ret
201 |
--------------------------------------------------------------------------------
/demo/pipeline.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from types import SimpleNamespace
3 | from pathlib import Path
4 |
5 | from hloc import extract_features, match_features, pairs_from_retrieval, triangulation
6 | from hloc.utils.read_write_model import read_model, write_model
7 |
8 | from kapture.io.records import get_record_fullpath
9 | from kapture.converter.colmap.export_colmap import export_colmap
10 | from kapture.io.csv import trajectories_to_file
11 |
12 | from .utils import image_list_from_kapture, write_submission
13 | from .localization import localize
14 | from .evaluation import evaluate
15 |
16 |
17 | CONFIG = {
18 | 'name': 'netvlad+superpoint+superglue',
19 | 'global_features': {
20 | 'model': {'name': 'netvlad'},
21 | 'preprocessing': {'resize_max': 1024},
22 | },
23 | 'local_features': {
24 | 'model': {
25 | 'name': 'superpoint',
26 | 'nms_radius': 3,
27 | 'max_keypoints': 2048,
28 | },
29 | 'preprocessing': {
30 | 'grayscale': True,
31 | 'resize_max': 1600,
32 | },
33 | },
34 | 'matching': {
35 | 'model': {
36 | 'name': 'superglue',
37 | 'weights': 'outdoor',
38 | 'sinkhorn_iterations': 10,
39 | },
40 | },
41 | 'num_pairs_sfm': 10,
42 | 'num_pairs_loc': 10,
43 | 'pnp_reprojection_thresh': 12.0,
44 | 'pnp_reprojection_thresh_rig': 1.0,
45 | }
46 |
47 |
48 | def run(dataset_path: Path, map_name: str, query_name: str, output_path: Path, config: dict):
49 | outputs = output_path / config['name']
50 | outputs.mkdir(parents=True, exist_ok=True)
51 | paths = SimpleNamespace(
52 | gfeats='global_features.h5',
53 | lfeats='local_features.h5',
54 | matches_sfm='matches_sfm.h5',
55 | matches_loc='matches_loc.h5',
56 | pairs_sfm='pairs_sfm.txt',
57 | pairs_loc='pairs_loc.txt',
58 | sfm_empty='sfm_empty',
59 | sfm='sfm',
60 | viz='viz',
61 | query_poses='query_poses.txt',
62 | results='results.txt',
63 | )
64 | for k, v in paths.__dict__.items():
65 | setattr(paths, k, outputs / v)
66 | paths.kapture_map = dataset_path / map_name
67 | paths.kapture_query = dataset_path / query_name
68 | paths.images_map = Path(get_record_fullpath(paths.kapture_map))
69 | paths.images_query = Path(get_record_fullpath(paths.kapture_query))
70 | paths.queries_single = dataset_path / 'queries_single.txt'
71 | paths.queries_rigs = dataset_path / 'queries_rigs.txt'
72 |
73 | images_map = image_list_from_kapture(paths.kapture_map)
74 | images_query = image_list_from_kapture(paths.kapture_query)
75 |
76 | # MAPPING
77 | extract_features.main(
78 | config['global_features'], paths.images_map, feature_path=paths.gfeats,
79 | image_list=images_map, as_half=True)
80 | pairs_from_retrieval.main(
81 | paths.gfeats, paths.pairs_sfm, config['num_pairs_sfm'],
82 | query_list=images_map, db_list=images_map)
83 |
84 | extract_features.main(
85 | config['local_features'], paths.images_map, feature_path=paths.lfeats,
86 | image_list=images_map, as_half=True)
87 | match_features.main(
88 | config['matching'], paths.pairs_sfm, paths.lfeats, matches=paths.matches_sfm)
89 |
90 | export_colmap(paths.kapture_map, paths.sfm_empty / 'colmap.db', paths.sfm_empty,
91 | force_overwrite_existing=True)
92 | write_model(*read_model(paths.sfm_empty, ext='.txt'), paths.sfm_empty)
93 | if not paths.sfm.exists():
94 | triangulation.main(
95 | paths.sfm, paths.sfm_empty, paths.images_map,
96 | paths.pairs_sfm, paths.lfeats, paths.matches_sfm)
97 |
98 | # LOCALIZATION
99 | extract_features.main(
100 | config['global_features'], paths.images_query, feature_path=paths.gfeats,
101 | image_list=images_query, as_half=True)
102 | pairs_from_retrieval.main(
103 | paths.gfeats, paths.pairs_loc, config['num_pairs_loc'],
104 | query_list=images_query, db_list=images_map)
105 |
106 | extract_features.main(
107 | config['local_features'], paths.images_query, feature_path=paths.lfeats,
108 | image_list=images_query, as_half=True)
109 | match_features.main(
110 | config['matching'], paths.pairs_loc, paths.lfeats, matches=paths.matches_loc)
111 |
112 | query_poses = localize(paths, config)
113 | trajectories_to_file(paths.query_poses, query_poses)
114 | write_submission(paths.results, query_poses)
115 |
116 |
117 | if __name__ == '__main__':
118 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
119 | parser.add_argument('--dataset_path', type=Path, default=Path('data/'),
120 | help='path to the top-level directory of the dataset')
121 | parser.add_argument('--map_name', type=str, default='mapping',
122 | help='name of the Kapture dataset of the map')
123 | parser.add_argument('--query_name', type=str, default='query',
124 | help='name of the Kapture dataset of the queries')
125 | parser.add_argument('--output_path', type=Path, default=Path('./outputs/'),
126 | help='path to the output directory')
127 | args = parser.parse_args()
128 | run(**args.__dict__, config=CONFIG)
129 |
--------------------------------------------------------------------------------
/demo/utils.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import List, Tuple, Iterator
3 | import numpy as np
4 | import h5py
5 |
6 | import kapture
7 | from kapture import Trajectories, PoseTransform
8 | from kapture.io.csv import kapture_from_dir
9 |
10 | from hloc.utils.parsers import names_to_pair
11 |
12 |
13 | def image_list_from_kapture(kapture_path: Path) -> List[str]:
14 | skip_heavy_useless = [kapture.Trajectories,
15 | kapture.RecordsLidar, kapture.RecordsWifi,
16 | kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures,
17 | kapture.Matches, kapture.Points3d, kapture.Observations]
18 | kapture_ = kapture_from_dir(kapture_path, skip_list=skip_heavy_useless)
19 | image_list = [name for _, _, name in kapture.flatten(kapture_.records_camera, is_sorted=True)]
20 | return image_list
21 |
22 |
23 | def parse_query_list(path: Path) -> List[Tuple[int, str]]:
24 | keys = []
25 | with open(path, 'r') as fid:
26 | for line in fid:
27 | line = line.strip('\n')
28 | if len(line) == 0 or line[0] == '#':
29 | continue
30 | timestamp, camera_id = line.split('/')
31 | keys.append((int(timestamp), camera_id))
32 | return keys
33 |
34 |
35 | def parse_submission(path: Path) -> Trajectories:
36 | poses = Trajectories()
37 | with open(path, 'r') as fid:
38 | for line in fid:
39 | line = line.strip('\n')
40 | if len(line) == 0 or line[0] == '#':
41 | continue
42 | name, qw, qx, qy, qz, tx, ty, tz = line.split(' ')
43 | pose = PoseTransform(np.array([qw, qx, qy, qz], float), np.array([tx, ty, tz], float))
44 | timestamp, camera_id = name.split('/')
45 | poses[(int(timestamp), camera_id)] = pose
46 | return poses
47 |
48 |
49 | def write_submission(path: Path, poses: Trajectories):
50 | with open(path, 'w') as fid:
51 | for timestamp, camera_id in poses.key_pairs():
52 | pose = poses[timestamp, camera_id]
53 | name = f'{timestamp}/{camera_id}'
54 | data = [name] + np.concatenate((pose.r_raw, pose.t_raw)).astype(str).tolist()
55 | fid.write(' '.join(data) + '\n')
56 |
57 |
58 |
59 | def get_keypoints(feats_path: Path, keys: Iterator[str]) -> List[np.ndarray]:
60 | with h5py.File(feats_path, 'r') as fid:
61 | keypoints = [fid[str(k)]['keypoints'].__array__() for k in keys]
62 | return keypoints
63 |
64 |
65 | def get_matches(matches_path: Path, key_pairs: Iterator[Tuple[str]]) -> List[np.ndarray]:
66 | matches = []
67 | with h5py.File(matches_path, 'r') as fid:
68 | for k1, k2 in key_pairs:
69 | pair = names_to_pair(str(k1), str(k2))
70 | m = fid[pair]['matches0'].__array__()
71 | idx = np.where(m != -1)[0]
72 | m = np.stack([idx, m[idx]], -1)
73 | matches.append(m)
74 | return matches
75 |
76 |
77 | def camera_to_dict(camera: kapture.Camera) -> dict:
78 | model, w, h, *params = camera.sensor_params
79 | return {
80 | 'model': model,
81 | 'width': int(w),
82 | 'height': int(h),
83 | 'params': np.array(params, float),
84 | }
85 |
--------------------------------------------------------------------------------
/demo/visualization.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import List, Dict
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | from hloc.utils.viz import plot_images, plot_matches, save_plot, cm_RdGn
7 | from hloc.utils.read_write_model import Image, Point3D
8 | from hloc.utils.io import read_image
9 |
10 |
11 | def plot_pnp_inliers(query_path: Path, ref_ids: List[int], ret: Dict,
12 | sfm_images: Dict[int, Image], sfm_points: Dict[int, Point3D],
13 | map_root: Path, num_pairs: int = 2):
14 |
15 | n = len(ref_ids)
16 | num_inliers = np.zeros(n)
17 | dbs_kp_q_db = [[] for _ in range(n)]
18 | inliers_dbs = [[] for _ in range(n)]
19 | inliers = ret.get('inliers', np.full(len(ret['p2d_q']), False))
20 | # for each pair of query keypoint and its matched 3D point,
21 | # we need to find its corresponding keypoint in each database image
22 | # that observes it. We also count the number of inliers in each.
23 | for i, (inl, (p3d_id, db_idxs)) in enumerate(zip(inliers, ret['p3d_matched_dbs'])):
24 | p3d = sfm_points[p3d_id]
25 | for db_idx in db_idxs:
26 | num_inliers[db_idx] += inl
27 | kp_db = p3d.point2D_idxs[p3d.image_ids == ref_ids[db_idx]][0]
28 | dbs_kp_q_db[db_idx].append((i, kp_db))
29 | inliers_dbs[db_idx].append(inl)
30 |
31 | idxs = np.argsort(num_inliers)[::-1][:num_pairs]
32 | qim = read_image(query_path)
33 | refs = [sfm_images[i].name for i in ref_ids]
34 | ims = []
35 | titles = []
36 | for i in idxs:
37 | ref = refs[i]
38 | rim = read_image(map_root / ref)
39 | ims.extend([qim, rim])
40 | inls = inliers_dbs[i]
41 | titles.extend([f'{sum(inls)}/{len(inls)}', Path(ref).name])
42 | plot_images(ims, titles)
43 |
44 | for i, idx in enumerate(idxs):
45 | color = cm_RdGn(np.array(inliers_dbs[idx]))
46 | dbs_kp_q_db_i = dbs_kp_q_db[idx]
47 | if len(dbs_kp_q_db_i) == 0:
48 | continue
49 | idxs_p2d_q, p2d_db = np.array(dbs_kp_q_db_i).T
50 | plot_matches(
51 | ret['p2d_q'][idxs_p2d_q],
52 | sfm_images[ref_ids[idx]].xys[p2d_db],
53 | color=color.tolist(),
54 | indices=(i*2, i*2+1), a=0.1)
55 |
56 |
57 | def dump_plot(path: Path):
58 | path.parent.mkdir(exist_ok=True, parents=True)
59 | save_plot(path)
60 | plt.close()
61 |
--------------------------------------------------------------------------------