├── .gitattributes
├── .github
└── workflows
│ ├── python-package.yml
│ └── python-publish.yml
├── .gitignore
├── .settings
└── org.eclipse.core.resources.prefs
├── LICENSE
├── MANIFEST.in
├── Readme.md
├── colmap_wrapper
├── __init__.py
├── data
│ ├── __init__.py
│ └── download.py
├── dataloader
│ ├── __init__.py
│ ├── bin.py
│ ├── camera.py
│ ├── loader.py
│ ├── project.py
│ ├── test.png
│ └── utils.py
├── gps
│ ├── __init__.py
│ ├── map.png
│ ├── registration.py
│ ├── result_map.png
│ ├── test.png
│ ├── utils.py
│ └── visualization.py
├── reconstruction
│ ├── __init__.py
│ ├── camera_config.py
│ ├── recunstruction.py
│ └── test.png
├── test
│ ├── result_map.png
│ ├── test.png
│ ├── test.py
│ ├── test2sides.py
│ └── testGPS.py
└── visualization
│ ├── __init__.py
│ ├── test.png
│ ├── visualization.py
│ └── visualization_photogrammetry_software.py
├── environment.yml
├── img
├── hilpoltstein_osm.png
├── img.png
├── img_1.png
├── img_2.png
├── map.png
└── result_map.png
├── pyproject.toml
├── requirements.txt
└── setup.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | img/img.png filter=lfs diff=lfs merge=lfs -text
2 | img/img_1.png filter=lfs diff=lfs merge=lfs -text
3 | img/img_2.png filter=lfs diff=lfs merge=lfs -text
4 | img/hilpoltstein_osm.png filter=lfs diff=lfs merge=lfs -text
5 | img/map.png filter=lfs diff=lfs merge=lfs -text
6 | img/result_map.png filter=lfs diff=lfs merge=lfs -text
7 | img/ filter=lfs diff=lfs merge=lfs -text
8 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | name: Test COLMAP Wrapper
2 |
3 | on: [ push ]
4 | jobs:
5 | build:
6 |
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | python-version: ["3.8", "3.9", "3.10" ]
11 |
12 | steps:
13 | - uses: actions/checkout@v3
14 | - name: Set up Python ${{ matrix.python-version }}
15 | uses: actions/setup-python@v3
16 | with:
17 | python-version: ${{ matrix.python-version }}
18 | - run: git checkout ${{ github.ref_name }}
19 | - name: 'Install dependencies'
20 | run: |
21 | python -m pip install --upgrade pip
22 | pip install .
23 | - name: 'Install ExifTools'
24 | run: |
25 | wget https://exiftool.org/Image-ExifTool-12.51.tar.gz
26 | gzip -dc Image-ExifTool-12.51.tar.gz | tar -xf -
27 | cd Image-ExifTool-12.51
28 | perl Makefile.PL
29 | make test
30 | sudo make install
31 | - name: 'Test COLMAP wrapper'
32 | run: |
33 | exiftool -ver
34 | python colmap_wrapper/test/test.py
35 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI
2 |
3 | on: push
4 |
5 | jobs:
6 | build-n-publish:
7 | name: Build and publish Python 🐍 distributions 📦 to PyPI
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@master
11 | - name: Set up Python 3.9
12 | uses: actions/setup-python@v1
13 | with:
14 | python-version: 3.9
15 | - name: Install pypa/build
16 | run: python -m pip install build --user
17 | - name: Build a binary wheel and a source tarball
18 | run: python -m build --sdist --wheel --outdir dist/ .
19 | - name: Publish distribution 📦 to Test PyPI
20 | uses: pypa/gh-action-pypi-publish@master
21 | with:
22 | password: ${{ secrets.TEST_PYPI_API_TOKEN }}
23 | repository_url: https://test.pypi.org/legacy/
24 | skip_existing: true
25 | - name: Publish distribution 📦 to PyPI
26 | if: startsWith(github.ref, 'refs/tags')
27 | uses: pypa/gh-action-pypi-publish@master
28 | with:
29 | password: ${{ secrets.PYPI_API_TOKEN }}
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ./colmap_wrapper/test/testGPS.py
2 | colmap_wrapper/data/bunny_img/*
3 | colmap_wrapper/reconstruction/test_reco/*
4 |
5 | .idea/
6 | data/
7 | #test.py
8 | colmap_wrapper.egg-info
9 | dist
10 | !colmap_wrapper/data
11 | colmap_wrapper/data/bunny_reco
12 | */**/*.zip
13 | # Byte-compiled / optimized / DLL files
14 | __pycache__/
15 | *.py[cod]
16 | *$py.class
17 | *.ply
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib64/
31 | parts/
32 | sdist/
33 | var/
34 | wheels/
35 | pip-wheel-metadata/
36 | share/python-wheels/
37 | *.egg-info/
38 | .installed.cfg
39 | *.egg
40 | MANIFEST
41 |
42 | # PyInstaller
43 | # Usually these files are written by a python script from a template
44 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
45 | *.manifest
46 | *.spec
47 |
48 | # Installer logs
49 | pip-log.txt
50 | pip-delete-this-directory.txt
51 |
52 | # Unit test / coverage reports
53 | htmlcov/
54 | .tox/
55 | .nox/
56 | .coverage
57 | .coverage.*
58 | .cache
59 | nosetests.xml
60 | coverage.xml
61 | *.cover
62 | *.py,cover
63 | .hypothesis/
64 | .pytest_cache/
65 |
66 | # Translations
67 | *.mo
68 | *.pot
69 |
70 | # Django stuff:
71 | *.log
72 | local_settings.py
73 | db.sqlite3
74 | db.sqlite3-journal
75 |
76 | # Flask stuff:
77 | instance/
78 | .webassets-cache
79 |
80 | # Scrapy stuff:
81 | .scrapy
82 |
83 | # Sphinx documentation
84 | docs/_build/
85 |
86 | # PyBuilder
87 | target/
88 |
89 | # Jupyter Notebook
90 | .ipynb_checkpoints
91 |
92 | # IPython
93 | profile_default/
94 | ipython_config.py
95 |
96 | # pyenv
97 | .python-version
98 |
99 | # pipenv
100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
103 | # install all needed dependencies.
104 | #Pipfile.lock
105 |
106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
107 | __pypackages__/
108 |
109 | # Celery stuff
110 | celerybeat-schedule
111 | celerybeat.pid
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # Environments
117 | .env
118 | .venv
119 | env/
120 | venv/
121 | ENV/
122 | env.bak/
123 | venv.bak/
124 |
125 | # Spyder project settings
126 | .spyderproject
127 | .spyproject
128 |
129 | # Rope project settings
130 | .ropeproject
131 |
132 | # mkdocs documentation
133 | /site
134 |
135 | # mypy
136 | .mypy_cache/
137 | .dmypy.json
138 | dmypy.json
139 |
140 | # Pyre type checker
141 | .pyre/
142 |
143 | # Eclipse
144 | /.project
145 | /.pydevproject
146 | .settings/
147 |
148 | # Test Files
149 | #test*.py
--------------------------------------------------------------------------------
/.settings/org.eclipse.core.resources.prefs:
--------------------------------------------------------------------------------
1 | eclipse.preferences.version=1
2 | encoding//colmap_wrapper/__init__.py=utf-8
3 | encoding//colmap_wrapper/colmap/__init__.py=utf-8
4 | encoding//colmap_wrapper/colmap/bin.py=utf-8
5 | encoding//colmap_wrapper/colmap/camera.py=utf-8
6 | encoding//colmap_wrapper/colmap/colmap.py=utf-8
7 | encoding//colmap_wrapper/colmap/utils.py=utf-8
8 | encoding//colmap_wrapper/visualization/__init__.py=utf-8
9 | encoding//colmap_wrapper/visualization/visualization.py=utf-8
10 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Lukas Meyer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include ./aruco_estimator *
2 |
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # COLMAP Wrapper
2 |
3 |
4 |
5 |
6 |
7 | ## About
8 |
9 | Colmap wrapper is a library to work with colmap projects. The purpose is the simplification to read e.g. rgb images,
10 | depth
11 | images, camera poses, sparse point clouds etc. Additionally a visualization for a colmap project is provided.
12 |
13 |
14 |
15 |
16 |
17 |
18 | ## Installation
19 |
20 | Make sure that you have a Python version >=3.8 installed.
21 |
22 | This repository is tested on Python 3.8+ and can currently only be installed
23 | from [PyPi](https://pypi.org/project/colmap-wrapper/).
24 |
25 | ````bash
26 | pip install dataloader-wrapper
27 | ````
28 |
29 | ## Usage
30 |
31 | ### Single Reconstruction
32 |
33 | To visualize a single reconstruction from COLMAP, the following code reads all colmap elements and visualizes them. For
34 | this case an example reconstruction project is provided as shown at the top of the readme.
35 |
36 | ```python
37 | from colmap_wrapper.dataloader import COLMAPLoader
38 | from colmap_wrapper.visualization import ColmapVisualization
39 | from colmap_wrapper.data.download import Dataset
40 |
41 | downloader = Dataset()
42 | downloader.download_bunny_dataset()
43 |
44 | project = COLMAPLoader(project_path=downloader.file_path)
45 |
46 | colmap_project = project.project
47 |
48 | # Acess camera, images and sparse + dense point cloud
49 | camera = colmap_project.cameras
50 | images = colmap_project.images
51 | sparse = colmap_project.get_sparse()
52 | dense = colmap_project.get_dense()
53 |
54 | # Visualize COLMAP Reconstruction
55 | project_vs = ColmapVisualization(colmap_project)
56 | project_vs.visualization(frustum_scale=0.7, image_type='image', image_resize=0.4)
57 | ```
58 |
59 | ### Multiple Incomplete Reconstruction
60 |
61 | In case of an incomplete reconstruction colmap creates partial reconstructions. In this case a list of
62 | reconstructions can be called as shown below.
63 |
64 | ```python
65 | from colmap_wrapper.dataloader import COLMAPLoader
66 | from colmap_wrapper.visualization import ColmapVisualization
67 |
68 | project = COLMAPLoader(project_path="[PATH2COLMAP_PROJECT]", image_resize=0.3)
69 |
70 | # project.projects is a list containing single dataloader projects
71 | for COLMAP_MODEL in project.projects:
72 | project_vs = ColmapVisualization(colmap=COLMAP_MODEL)
73 | project_vs.visualization(frustum_scale=0.7, image_type='image')
74 | ```
75 |
76 | ## References
77 |
78 | * [PyExifTool](https://github.com/sylikc/pyexiftool): A library to communicate with the [ExifTool](https://exiftool.org)
79 | command- application. If you have trouble installing it please refer to the PyExifTool-Homepage.
80 |
81 | ```bash
82 | # For Ubuntu users:
83 | wget https://exiftool.org/Image-ExifTool-12.51.tar.gz
84 | gzip -dc Image-ExifTool-12.51.tar.gz | tar -xf -
85 | cd Image-ExifTool-12.51
86 | perl Makefile.PL
87 | make test
88 | sudo make install
89 | ```
90 |
91 | * To Visualize the Reconstruction on an OSM-Map the implementation
92 | from [GPS-visualization-Python](https://github.com/tisljaricleo/GPS-visualization-Python) is used. A guide to
93 | visualize gps data can be found
94 | on [Medium](https://towardsdatascience.com/simple-gps-data-visualization-using-python-and-open-street-maps-50f992e9b676)
95 | .
96 |
--------------------------------------------------------------------------------
/colmap_wrapper/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 | from colmap_wrapper.dataloader import *
9 | from colmap_wrapper.reconstruction import *
10 | from colmap_wrapper.visualization import *
11 | from colmap_wrapper.data import *
12 | from colmap_wrapper.gps import *
13 |
14 | import os
15 |
16 | if not os.getenv("GITHUB_ACTIONS"):
17 | import getpass
18 |
19 | USER_NAME = getpass.getuser()
20 |
--------------------------------------------------------------------------------
/colmap_wrapper/data/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from colmap_wrapper.data.download import *
--------------------------------------------------------------------------------
/colmap_wrapper/data/download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | # Built-in/Generic Imports
10 | import os
11 | import urllib.request
12 |
13 | # Libs
14 | from zipfile import ZipFile
15 | from tqdm import tqdm
16 |
17 | # Own modules
18 | # ...
19 |
20 | EXISTS = True
21 | NON_EXIST = False
22 |
23 |
24 | class DownloadProgressBar(tqdm):
25 | def update_to(self, b=1, bsize=1, tsize=None):
26 | if tsize is not None:
27 | self.total = tsize
28 | self.update(b * bsize - self.n)
29 |
30 |
31 | def download(url: str, output_dir: str, overwrite: bool = False):
32 | filename = os.path.join(output_dir, url.split('/')[-1])
33 |
34 | if os.path.exists(filename) and not overwrite:
35 | print('{} already exists in {}'.format(url.split('/')[-1], output_dir))
36 | else:
37 | with DownloadProgressBar(unit='B',
38 | unit_scale=True,
39 | miniters=1,
40 | desc=url.split('/')[-1]) as t:
41 | urllib.request.urlretrieve(url, filename=filename, reporthook=t.update_to)
42 |
43 | return filename
44 |
45 |
46 | def extract(filename: str, output_dir: str):
47 | # opening the zip_file file in READ mode
48 | with ZipFile(filename, 'r') as zip_file:
49 | # printing all the contents of the zip_file file
50 | # zip_file.printdir()
51 |
52 | # extracting all the files
53 | print('Extracting all the files now...')
54 | zip_file.extractall(path=output_dir)
55 | print('Done!')
56 |
57 |
58 | class Dataset:
59 | def __init__(self):
60 | self.dataset_name = None
61 | self.file_path = None
62 | self.filename = None
63 | self.url = None
64 | self.data_path = None
65 | self.scale = None # in cm
66 |
67 | def __check_existence(self, output_directory, dataset_name):
68 | if output_directory == os.path.abspath(__file__):
69 | self.data_path = os.path.abspath(os.path.join(output_directory, '..', '..', 'data'))
70 | else:
71 | self.data_path = os.path.join(output_directory, 'data')
72 |
73 | os.makedirs(self.data_path, exist_ok=True)
74 |
75 | if os.path.exists(os.path.join(self.data_path, dataset_name)):
76 | return EXISTS
77 | else:
78 | return NON_EXIST
79 |
80 | def download_bunny_reco_dataset(self, output_path: str = os.path.abspath(__file__), overwrite: bool = False):
81 |
82 | self.url = 'https://faubox.rrze.uni-erlangen.de/dl/fiN75KeHkAB78x3MTexFMu/bunny_reco.zip'
83 |
84 | self.dataset_name = 'bunny_reco'
85 |
86 | existence = self.__check_existence(output_directory=output_path, dataset_name=self.dataset_name)
87 |
88 | if existence == NON_EXIST:
89 | self.filename = download(url=self.url, output_dir=self.data_path, overwrite=overwrite)
90 | extract(filename=self.filename, output_dir=self.data_path)
91 | else:
92 | print('Dataset {} already exists at location {}'.format(self.dataset_name, self.data_path))
93 |
94 | self.file_path = os.path.abspath(
95 | os.path.join(self.data_path, self.url.split('/')[-1].split('.zip')[0]))
96 | return self.file_path
97 |
98 | def download_bunny_images_dataset(self, output_path: str = os.path.abspath(__file__), overwrite: bool = False):
99 |
100 | self.url = 'https://faubox.rrze.uni-erlangen.de/dl/fiWk9gBEZLd8Zkf48S7WNw/bunny_img.zip'
101 |
102 | self.dataset_name = 'bunny_images'
103 |
104 | existence = self.__check_existence(output_directory=output_path, dataset_name=self.dataset_name)
105 |
106 | if existence == NON_EXIST:
107 | self.filename = download(url=self.url, output_dir=self.data_path, overwrite=overwrite)
108 | extract(filename=self.filename, output_dir=self.data_path)
109 | else:
110 | print('Dataset {} already exists at location {}'.format(self.dataset_name, self.data_path))
111 |
112 | self.file_path = os.path.abspath(
113 | os.path.join(self.data_path, self.url.split('/')[-1].split('.zip')[0]))
114 | return self.file_path
115 |
116 |
117 | if __name__ == '__main__':
118 | downloader = Dataset()
119 | downloader.download_bunny_reco_dataset()
120 |
121 | print('Saved at {}'.format(downloader.file_path))
122 |
123 | downloader = Dataset()
124 | downloader.download_bunny_images_dataset()
125 |
126 | print('Saved at {}'.format(downloader.file_path))
127 |
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | from colmap_wrapper.dataloader.utils import *
10 | from colmap_wrapper.dataloader.bin import *
11 | from colmap_wrapper.dataloader.camera import *
12 | from colmap_wrapper.dataloader.loader import *
13 | from colmap_wrapper.dataloader.project import *
14 |
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/bin.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 |
8 |
9 | Code for COLMAP readout borrowed from https://github.com/uzh-rpg/colmap_utils/tree/97603b0d352df4e0da87e3ce822a9704ac437933
10 | and
11 | https://github.com/colmap/colmap/blob/dev/scripts/python/
12 | """
13 |
14 | # Built-in/Generic Imports
15 | import struct
16 | import pathlib as path
17 |
18 | # Libs
19 | import numpy as np
20 |
21 | # Own
22 | from colmap_wrapper.dataloader.camera import (CAMERA_MODEL_IDS, Camera, ImageInformation, Point3D)
23 |
24 |
25 | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
26 | """Read and unpack the next bytes from a binary file.
27 | :param fid:
28 | :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
29 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
30 | :param endian_character: Any of {@, =, <, >, !}
31 | :return: Tuple of read and unpacked values.
32 | """
33 | data = fid.read(num_bytes)
34 | # Struct unpack return tuple (https://docs.python.org/3/library/struct.html)
35 | return struct.unpack(endian_character + format_char_sequence, data)
36 |
37 | def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
38 | """pack and write to a binary file.
39 | :param fid:
40 | :param data: data to send, if multiple elements are sent at the same time,
41 | they should be encapsuled either in a list or a tuple
42 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
43 | should be the same length as the data list or tuple
44 | :param endian_character: Any of {@, =, <, >, !}
45 | """
46 | if isinstance(data, (list, tuple)):
47 | bytes = struct.pack(endian_character + format_char_sequence, *data)
48 | else:
49 | bytes = struct.pack(endian_character + format_char_sequence, data)
50 | fid.write(bytes)
51 |
52 |
53 | def read_cameras_binary(path_to_model_file):
54 | """
55 | Original C++ Code can be found here: https://github.com/colmap/colmap/blob/dev/src/base/reconstruction.cc
56 | void Reconstruction::WriteCamerasBinary(const std::string& path)
57 | void Reconstruction::ReadCamerasBinary(const std::string& path)
58 | """
59 | cameras = {}
60 | with open(path_to_model_file, "rb") as fid:
61 | # First 8 bits contain information about the quantity of different camera models
62 | num_cameras = read_next_bytes(fid, 8, "Q")[0]
63 | for _ in range(num_cameras): # camera_line_index
64 | # Afterwards the 64 bits contain information about a specific camera
65 | camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence="iiQQ")
66 | camera_id = camera_properties[0]
67 | model_id = camera_properties[1]
68 | model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
69 | width = camera_properties[2]
70 | height = camera_properties[3]
71 | num_params = CAMERA_MODEL_IDS[model_id].num_params
72 | # The next NUM_PARAMS * 8 bits contain information about the camera parameters
73 | params = read_next_bytes(fid, num_bytes=8 * num_params,
74 | format_char_sequence="d" * num_params)
75 | cameras[camera_id] = Camera(id=camera_id,
76 | model=model_name,
77 | width=width,
78 | height=height,
79 | params=np.array(params))
80 | assert len(cameras) == num_cameras
81 | return cameras
82 |
83 |
84 | def read_images_binary(path_to_model_file):
85 | """
86 | Original C++ Code can be found here: https://github.com/colmap/colmap/blob/dev/src/base/reconstruction.cc
87 | void Reconstruction::ReadImagesBinary(const std::string& path)
88 | void Reconstruction::WriteImagesBinary(const std::string& path)
89 | """
90 | images = {}
91 | with open(path_to_model_file, "rb") as fid:
92 | # First 8 bits contain information about the quantity of different registrated camera models
93 | num_reg_images = read_next_bytes(fid, 8, "Q")[0]
94 | for _ in range(num_reg_images): # image_index
95 | # Image properties: (image_id, qvec, tvec, camera_id)
96 | binary_image_properties = read_next_bytes(
97 | fid, num_bytes=64, format_char_sequence="idddddddi")
98 | image_id = binary_image_properties[0]
99 | # Normalized rotation quaternion - 4 entries
100 | qvec = np.array(binary_image_properties[1:5])
101 | # Translational Part - 3 entries
102 | tvec = np.array(binary_image_properties[5:8])
103 | # Camera ID
104 | camera_id = binary_image_properties[8]
105 | image_name = ""
106 | current_char = read_next_bytes(fid, 1, "c")[0]
107 | while current_char != b"\x00": # look for the ASCII 0 entry
108 | image_name += current_char.decode("utf-8")
109 | current_char = read_next_bytes(fid, 1, "c")[0]
110 | # Number of 2D image features detected
111 | num_points2D = read_next_bytes(fid, num_bytes=8,
112 | format_char_sequence="Q")[0]
113 | # 2D location of features in image + Feature ID (x,y,id)
114 | x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2D,
115 | format_char_sequence="ddq" * num_points2D)
116 | # 2D location of features in image
117 | xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
118 | tuple(map(float, x_y_id_s[1::3]))])
119 | # Feature ID
120 | point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
121 |
122 | pt3did_to_kpidx = {}
123 | for kpidx, ptid in enumerate(point3D_ids.ravel().tolist()):
124 | if ptid != -1:
125 | if ptid in pt3did_to_kpidx:
126 | # print("3D point {} already exits in {}, skip".format(
127 | # ptid, image_name))
128 | continue
129 | pt3did_to_kpidx[ptid] = kpidx
130 |
131 | images[image_id] = ImageInformation(image_id=image_id,
132 | qvec=qvec,
133 | tvec=tvec,
134 | camera_id=camera_id,
135 | image_name=image_name,
136 | xys=xys,
137 | point3D_ids=point3D_ids,
138 | point3DiD_to_kpidx=pt3did_to_kpidx)
139 | return images
140 |
141 |
142 | def read_images_text(path):
143 | """
144 | see: src/base/reconstruction.cc
145 | void Reconstruction::ReadImagesText(const std::string& path)
146 | void Reconstruction::WriteImagesText(const std::string& path)
147 | """
148 | images = {}
149 | with open(path, "r") as fid:
150 | while True:
151 | line = fid.readline()
152 | if not line:
153 | break
154 | line = line.strip()
155 | if len(line) > 0 and line[0] != "#":
156 | elems = line.split()
157 | image_id = int(elems[0])
158 | qvec = np.array(tuple(map(float, elems[1:5])))
159 | tvec = np.array(tuple(map(float, elems[5:8])))
160 | camera_id = int(elems[8])
161 | image_name = elems[9]
162 | elems = fid.readline().split()
163 | xys = np.column_stack([tuple(map(float, elems[0::3])),
164 | tuple(map(float, elems[1::3]))])
165 | point3D_ids = np.array(tuple(map(int, elems[2::3])))
166 |
167 | pt3did_to_kpidx = {}
168 | for kpidx, ptid in enumerate(point3D_ids.ravel().tolist()):
169 | if ptid != -1:
170 | if ptid in pt3did_to_kpidx:
171 | # print("3D point {} already exits in {}, skip".format(
172 | # ptid, image_name))
173 | continue
174 | pt3did_to_kpidx[ptid] = kpidx
175 |
176 | images[image_id] = ImageInformation(image_id=image_id,
177 | qvec=qvec,
178 | tvec=tvec,
179 | camera_id=camera_id,
180 | image_name=image_name,
181 | xys=xys,
182 | point3D_ids=point3D_ids,
183 | point3DiD_to_kpidx=pt3did_to_kpidx)
184 | return images
185 |
186 |
187 | def read_points3d_binary(path_to_model_file):
188 | """
189 | Original C++ Code can be found here: https://github.com/colmap/colmap/blob/dev/src/base/reconstruction.cc
190 | void Reconstruction::ReadPoints3DBinary(const std::string& path)
191 | void Reconstruction::WritePoints3DBinary(const std::string& path)
192 | """
193 | points3D = {}
194 | with open(path_to_model_file, "rb") as fid:
195 | # Number of points in sparse model
196 | num_points = read_next_bytes(fid, 8, "Q")[0]
197 | for _ in range(num_points): # point_line_index
198 | binary_point_line_properties = read_next_bytes(
199 | fid, num_bytes=43, format_char_sequence="QdddBBBd")
200 | # Point ID
201 | point3D_id = binary_point_line_properties[0]
202 | # XYZ
203 | xyz = np.array(binary_point_line_properties[1:4])
204 | # RGB
205 | rgb = np.array(binary_point_line_properties[4:7])
206 | # What kind of error?
207 | error = np.array(binary_point_line_properties[7])
208 | # Number of features that observed this 3D Point
209 | track_length = read_next_bytes(
210 | fid, num_bytes=8, format_char_sequence="Q")[0]
211 | # Feature ID connected to this point
212 | track_elems = read_next_bytes(
213 | fid, num_bytes=8 * track_length,
214 | format_char_sequence="ii" * track_length)
215 | # Image ID connected to this 3D Point
216 | image_ids = np.array(tuple(map(int, track_elems[0::2])))
217 | point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
218 | points3D[point3D_id] = Point3D(
219 | point_id=point3D_id, xyz=xyz, rgb=rgb,
220 | error=error, image_ids=image_ids,
221 | point2D_idxs=point2D_idxs)
222 | return points3D
223 |
224 |
225 | def read_points3D_text(path):
226 | """
227 | see: src/base/reconstruction.cc
228 | void Reconstruction::ReadPoints3DText(const std::string& path)
229 | void Reconstruction::WritePoints3DText(const std::string& path)
230 | """
231 | points3D = {}
232 | with open(path, "r") as fid:
233 | while True:
234 | line = fid.readline()
235 | if not line:
236 | break
237 | line = line.strip()
238 | if len(line) > 0 and line[0] != "#":
239 | elems = line.split()
240 | point3D_id = int(elems[0])
241 | xyz = np.array(tuple(map(float, elems[1:4])))
242 | rgb = np.array(tuple(map(int, elems[4:7])))
243 | error = float(elems[7])
244 | image_ids = np.array(tuple(map(int, elems[8::2])))
245 | point2D_idxs = np.array(tuple(map(int, elems[9::2])))
246 | points3D[point3D_id] = Point3D(point_id=point3D_id, xyz=xyz, rgb=rgb,
247 | error=error, image_ids=image_ids,
248 | point2D_idxs=point2D_idxs)
249 | return points3D
250 |
251 |
252 | # ToDo: Image reader
253 |
254 |
255 | def read_reconstruction_data(base_path):
256 | '''
257 | Path to dataloader folder. It must contain folder 'sparse' with files 'cameras.bin', 'images.bin', 'points3D.bin'.
258 |
259 | Returns cameras, images, points3D
260 |
261 | :param path:
262 | :return: cameras, images, points3D
263 | '''
264 | reco_base_path = path.Path(base_path)
265 | sparse_base_path = reco_base_path.joinpath('sparse')
266 | camera_path = sparse_base_path.joinpath('cameras.bin')
267 | image_path = sparse_base_path.joinpath('images.bin')
268 | points3D_path = sparse_base_path.joinpath('points3D.bin')
269 |
270 | points3D = read_points3d_binary(points3D_path)
271 | cameras = read_cameras_binary(camera_path)
272 | images = read_images_binary(image_path)
273 |
274 | return cameras, images, points3D
275 |
276 |
277 | def read_cameras_text(path, int_id=True):
278 | """
279 | see: src/base/reconstruction.cc
280 | void Reconstruction::WriteCamerasText(const std::string& path)
281 | void Reconstruction::ReadCamerasText(const std::string& path)
282 | """
283 | cameras = {}
284 | with open(path, "r") as fid:
285 | while True:
286 | line = fid.readline()
287 | if not line:
288 | break
289 | line = line.strip()
290 | if len(line) > 0 and line[0] != "#":
291 | elems = line.split()
292 | if int_id:
293 | camera_id = int(elems[0])
294 | else:
295 | camera_id = elems[0]
296 | model = elems[1]
297 | width = int(elems[2])
298 | height = int(elems[3])
299 | params = np.array(tuple(map(float, elems[4:])))
300 | cameras[camera_id] = Camera(id=camera_id, model=model,
301 | width=width, height=height,
302 | params=params)
303 | return cameras
304 |
305 |
306 | def read_array(path):
307 | with open(path, "rb") as fid:
308 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, usecols=(0, 1, 2), dtype=int)
309 | fid.seek(0)
310 | num_delimiter = 0
311 | byte = fid.read(1)
312 | while True:
313 | if byte == b"&":
314 | num_delimiter += 1
315 | if num_delimiter >= 3:
316 | break
317 | byte = fid.read(1)
318 | array = np.fromfile(fid, np.float32)
319 | array = array.reshape((width, height, channels), order="F")
320 | return np.transpose(array, (1, 0, 2)).squeeze()
321 |
322 |
323 | def write_cameras_text(cameras, path):
324 | """
325 | see: src/base/reconstruction.cc
326 | void Reconstruction::WriteCamerasText(const std::string& path)
327 | void Reconstruction::ReadCamerasText(const std::string& path)
328 | """
329 | HEADER = "# Camera list with one line of data per camera:\n" + \
330 | "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n" + \
331 | "# Number of cameras: {}\n".format(len(cameras))
332 | with open(path, "w") as fid:
333 | fid.write(HEADER)
334 | for _, cam in cameras.items():
335 | to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
336 | line = " ".join([str(elem) for elem in to_write])
337 | fid.write(line + "\n")
338 |
339 |
340 | def write_images_text(images, path):
341 | """
342 | see: src/base/reconstruction.cc
343 | void Reconstruction::ReadImagesText(const std::string& path)
344 | void Reconstruction::WriteImagesText(const std::string& path)
345 | """
346 | if len(images) == 0:
347 | mean_observations = 0
348 | else:
349 | mean_observations = sum((len(img.point3D_ids) for _, img in images.items())) / len(images)
350 | HEADER = "# Image list with two lines of data per image:\n" + \
351 | "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + \
352 | "# POINTS2D[] as (X, Y, POINT3D_ID)\n" + \
353 | "# Number of images: {}, mean observations per image: {}\n".format(len(images), mean_observations)
354 |
355 | with open(path, "w") as fid:
356 | fid.write(HEADER)
357 | for _, img in images.items():
358 | image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
359 | first_line = " ".join(map(str, image_header))
360 | fid.write(first_line + "\n")
361 |
362 | points_strings = []
363 | for xy, point3D_id in zip(img.xys, img.point3D_ids):
364 | points_strings.append(" ".join(map(str, [*xy, point3D_id])))
365 | fid.write(" ".join(points_strings) + "\n")
366 |
367 | def write_images_binary(images, path_to_model_file):
368 | """
369 | see: src/base/reconstruction.cc
370 | void Reconstruction::ReadImagesBinary(const std::string& path)
371 | void Reconstruction::WriteImagesBinary(const std::string& path)
372 | """
373 | with open(path_to_model_file, "wb") as fid:
374 | write_next_bytes(fid, len(images), "Q")
375 | for _, img in images.items():
376 | write_next_bytes(fid, img.id, "i")
377 | write_next_bytes(fid, img.qvec.tolist(), "dddd")
378 | write_next_bytes(fid, img.tvec.tolist(), "ddd")
379 | write_next_bytes(fid, img.camera_id, "i")
380 | for char in img.name:
381 | write_next_bytes(fid, char.encode("utf-8"), "c")
382 | write_next_bytes(fid, b"\x00", "c")
383 | write_next_bytes(fid, len(img.point3D_ids), "Q")
384 | for xy, p3d_id in zip(img.xys, img.point3D_ids):
385 | write_next_bytes(fid, [*xy, p3d_id], "ddq")
386 |
387 | def write_points3D_text(points3D, path):
388 | """
389 | see: src/base/reconstruction.cc
390 | void Reconstruction::ReadPoints3DText(const std::string& path)
391 | void Reconstruction::WritePoints3DText(const std::string& path)
392 | """
393 | if len(points3D) == 0:
394 | mean_track_length = 0
395 | else:
396 | mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items())) / len(points3D)
397 | HEADER = "# 3D point list with one line of data per point:\n" + \
398 | "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n" + \
399 | "# Number of points: {}, mean track length: {}\n".format(len(points3D), mean_track_length)
400 |
401 | with open(path, "w") as fid:
402 | fid.write(HEADER)
403 | for _, pt in points3D.items():
404 | point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error]
405 | fid.write(" ".join(map(str, point_header)) + " ")
406 | track_strings = []
407 | for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs):
408 | track_strings.append(" ".join(map(str, [image_id, point2D])))
409 | fid.write(" ".join(track_strings) + "\n")
410 |
411 | def write_points3D_binary(points3D, path_to_model_file):
412 | """
413 | see: src/base/reconstruction.cc
414 | void Reconstruction::ReadPoints3DBinary(const std::string& path)
415 | void Reconstruction::WritePoints3DBinary(const std::string& path)
416 | """
417 | with open(path_to_model_file, "wb") as fid:
418 | write_next_bytes(fid, len(points3D), "Q")
419 | for _, pt in points3D.items():
420 | write_next_bytes(fid, pt.id, "Q")
421 | write_next_bytes(fid, pt.xyz.tolist(), "ddd")
422 | write_next_bytes(fid, pt.rgb.tolist(), "BBB")
423 | write_next_bytes(fid, pt.error, "d")
424 | track_length = pt.image_ids.shape[0]
425 | write_next_bytes(fid, track_length, "Q")
426 | for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs):
427 | write_next_bytes(fid, [image_id, point2D_id], "ii")
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/camera.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | # Built-in/Generic Imports
10 | import collections
11 |
12 | import numpy as np
13 | from PIL import Image
14 |
15 | from colmap_wrapper.dataloader import (qvec2rotmat, rotmat2qvec)
16 |
17 | CameraModel = collections.namedtuple("CameraModel", ["model_id", "model_name", "num_params"])
18 | Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"])
19 |
20 | CAMERA_MODELS = {
21 | CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
22 | CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
23 | CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
24 | CameraModel(model_id=3, model_name="RADIAL", num_params=5),
25 | CameraModel(model_id=4, model_name="OPENCV", num_params=8),
26 | CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
27 | CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
28 | CameraModel(model_id=7, model_name="FOV", num_params=5),
29 | CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
30 | CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
31 | CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
32 | }
33 | CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS])
34 |
35 |
36 | class Point3D:
37 | def __init__(self,
38 | point_id: int,
39 | xyz: np.ndarray,
40 | rgb: np.ndarray,
41 | error: float,
42 | image_ids: np.ndarray,
43 | point2D_idxs: np.ndarray):
44 | self.id = point_id
45 | self.xyz = xyz
46 | self.rgb = rgb
47 | self.error = error
48 | self.image_ids = image_ids
49 | self.point2D_idxs = point2D_idxs
50 |
51 |
52 | class ImageInformation(object):
53 | def __init__(self,
54 | image_id: int,
55 | qvec: np.ndarray,
56 | tvec: np.ndarray,
57 | camera_id: int,
58 | image_name: str,
59 | xys: np.ndarray,
60 | point3D_ids: np.ndarray,
61 | point3DiD_to_kpidx: dict):
62 | """
63 | @param image_id: number of image
64 | @param qvec: quaternion of the camera viewing direction
65 | @param tvec: translation of the image/camera position
66 | @param camera_id: camera id
67 | @param image_name: name of th image
68 | @param xys:
69 | @param point3D_ids:
70 | @param point3DiD_to_kpidx:
71 |
72 | """
73 | # Parsed arguments
74 | self.id = image_id
75 | self.qvec = qvec
76 | self.tvec = tvec
77 | self.camera_id = camera_id
78 | self.name = image_name
79 | self.xys = xys
80 | self.point3D_ids = point3D_ids
81 | self.point3DiD_to_kpidx = point3DiD_to_kpidx
82 |
83 | self.intrinsics = None
84 | self.extrinsics = np.eye(4)
85 |
86 | self.path = None
87 | self.depth_image_geometric_path = None
88 | self.depth_image_photometric_path = None
89 |
90 | self.__image = None
91 |
92 | self.set_extrinsics()
93 |
94 | @property
95 | def image(self):
96 | return self.getData(1.0)
97 |
98 | @image.setter
99 | def image(self, image: np.ndarray):
100 | self.__image = image
101 |
102 | def getData(self, downsample: float = 1.0) -> np.ndarray:
103 | if self.__image is None:
104 | try:
105 | with Image.open(self.path) as img:
106 | width, height = img.size
107 | img = img.resize((int(width * downsample), int(height * downsample)))
108 | return np.asarray(img).astype(np.uint8)
109 | except FileNotFoundError:
110 | img = np.zeros((400, 400))
111 |
112 | return np.asarray(img).astype(np.uint8)
113 |
114 | return self.__image
115 |
116 | @property
117 | def depth_image_geometric(self):
118 | if self.depth_image_geometric_path == None:
119 | return None
120 | from colmap_wrapper.colmap import read_array
121 | return read_array(path=self.depth_image_geometric_path)
122 |
123 | @property
124 | def depth_image_photometric(self):
125 | if self.depth_image_photometric_path == None:
126 | return None
127 | from colmap_wrapper.colmap import read_array
128 | return read_array(path=self.depth_image_photometric_path)
129 |
130 | # @property
131 | # def extrinsics(self) -> np.ndarray:
132 | # Rwc = self.Rwc()
133 | # twc = self.twc()
134 | #
135 | # M = np.eye(4)
136 | # M[:3, :3] = Rwc
137 | # M[:3, 3] = twc
138 |
139 | # return M
140 |
141 | def set_extrinsics(self, T: [None, np.ndarray] = None):
142 | if isinstance(T, type(None)):
143 | Rwc = self.Rwc()
144 | twc = self.twc()
145 | else:
146 | Rwc = T[:3, :3]
147 | twc = T[:3, 3]
148 |
149 | self.extrinsics = np.eye(4)
150 | self.extrinsics[:3, :3] = Rwc
151 | self.extrinsics[:3, 3] = twc
152 |
153 | def qvec2rotmat(self):
154 | return qvec2rotmat(self.qvec)
155 |
156 | def rotmat2qvec(self):
157 | return rotmat2qvec(self.extrinsics[:3, :3])
158 |
159 | def qtvec(self):
160 | return self.qvec.ravel().tolist() + self.tvec.ravel().tolist()
161 |
162 | def Rwc(self):
163 | return self.qvec2rotmat().transpose()
164 |
165 | def twc(self):
166 | return np.dot(-self.qvec2rotmat().transpose(), self.tvec)
167 |
168 | def Rcw(self):
169 | return self.qvec2rotmat()
170 |
171 | def tcw(self):
172 | return self.tvec
173 |
174 | def Twc(self):
175 | Twc = np.eye(4)
176 | Twc[0:3, 3] = self.twc()
177 | Twc[0:3, 0:3] = self.Rwc()
178 |
179 | return Twc
180 |
181 | def Tcw(self):
182 | Tcw = np.eye(4)
183 | Tcw[0:3, 3] = self.tcw()
184 | Tcw[0:3, 0:3] = self.Rcw()
185 |
186 | return Tcw
187 |
188 |
189 | class Intrinsics:
190 | def __init__(self, camera):
191 | self._cx = None
192 | self._cy = None
193 | self._fx = None
194 | self._fy = None
195 |
196 | self.camera = camera
197 | self.load_from_colmap(camera=self.camera)
198 |
199 | def load_from_colmap(self, camera):
200 | self.fx = camera.params[0]
201 | self.fy = camera.params[1]
202 | self.cx = camera.params[2]
203 | self.cy = camera.params[3]
204 | self.k1 = camera.params[4]
205 |
206 | @property
207 | def cx(self):
208 | return self._cx
209 |
210 | @cx.setter
211 | def cx(self, cx):
212 | self._cx = cx
213 |
214 | @property
215 | def cy(self):
216 | return self._cy
217 |
218 | @cy.setter
219 | def cy(self, cy):
220 | self._cy = cy
221 |
222 | @property
223 | def fx(self):
224 | return self._fx
225 |
226 | @fx.setter
227 | def fx(self, fx):
228 | self._fx = fx
229 |
230 | @property
231 | def fy(self):
232 | return self._fy
233 |
234 | @fy.setter
235 | def fy(self, fy):
236 | self._fy = fy
237 |
238 | @property
239 | def K(self):
240 | K = np.asarray([[self.fx, 0, self.cx],
241 | [0, self.fy, self.cy],
242 | [0, 0, 1]])
243 |
244 | return K
245 |
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/loader.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 |
8 | Code for COLMAP readout borrowed from https://github.com/uzh-rpg/colmap_utils/tree/97603b0d352df4e0da87e3ce822a9704ac437933
9 | """
10 |
11 | # Built-in/Generic Imports
12 | from pathlib import Path
13 | from typing import Union
14 |
15 | # Libs
16 | import numpy as np
17 |
18 | # Own modules
19 | from colmap_wrapper.dataloader.project import COLMAPProject
20 | from colmap_wrapper.gps.registration import GPSRegistration
21 |
22 |
23 | class COLMAPLoader(GPSRegistration):
24 | def __init__(self, project_path: Union[str, Path],
25 | dense_pc: str = 'fused.ply',
26 | bg_color: np.ndarray = np.asarray([1, 1, 1]),
27 | exif_read: bool = False,
28 | img_orig: Union[str, Path, None] = None,
29 | output_status_function=None,
30 | oriented: bool = False):
31 | """
32 | Constructor for COLMAPLoader class.
33 |
34 | Args:
35 | project_path (Union[str, Path]): Path to the COLMAP project.
36 | dense_pc (str): Name of the dense point cloud file.
37 | bg_color (np.ndarray): Background color as a NumPy array.
38 | exif_read (bool): Flag to indicate whether to read EXIF data.
39 | img_orig (Union[str, Path, None]): Path to the original image.
40 | output_status_function: Function for displaying status output.
41 | oriented (bool): Flag to indicate whether the project is oriented.
42 | """
43 |
44 | GPSRegistration.__init__(self)
45 |
46 | self.exif_read = exif_read
47 | self.vis_bg_color = bg_color
48 | self._project_path: Path = Path(project_path)
49 | # Original image path. Colmap does not copy exif data. Therfore we have to access original data
50 | self._img_orig_path: Union[Path, None] = Path(img_orig) if img_orig else None
51 |
52 | if '~' in str(self._project_path):
53 | self._project_path = self._project_path.expanduser()
54 |
55 | self._sparse_base_path = self._project_path.joinpath('sparse')
56 | if not self._sparse_base_path.exists():
57 | raise ValueError('Colmap project structure: sparse folder (cameras, images, points3d) can not be found')
58 |
59 | project_structure = {}
60 | self._dense_base_path = self._project_path.joinpath('dense')
61 |
62 | # Test if path is a file to get number of subprojects. Single Project with no numeric folder
63 | if not all([path.is_dir() for path in self._sparse_base_path.iterdir()]):
64 | project_structure.update({0: {
65 | "project_path": self._project_path,
66 | "sparse": self._sparse_base_path,
67 | "dense": self._dense_base_path}})
68 | else:
69 | # In case of folder with reconstruction number after sparse (multiple projects) (e.g. 0,1,2)
70 | # WARNING: dense folder 0 an1 are not the same as sparse 0 and 1 (ToDO: find out if this is always the case)
71 | # for project_index, sparse_project_path in enumerate(list(self._sparse_base_path.iterdir())):
72 | # project_structure.update({project_index: {"sparse": sparse_project_path}})
73 |
74 | for project_index, sparse_project_path in enumerate(list(self._dense_base_path.iterdir())):
75 | project_structure.update({project_index: {"sparse": sparse_project_path.joinpath('sparse')}})
76 |
77 | for project_index, dense_project_path in enumerate(list(self._dense_base_path.iterdir())):
78 | project_structure[project_index].update({"dense": dense_project_path})
79 |
80 | for project_index in project_structure.keys():
81 | project_structure[project_index].update({"project_path": self._project_path})
82 |
83 | self.project_list = []
84 | self.model_ids = []
85 |
86 | # n_cores = 1# cpu_count()
87 | # executor = ThreadPoolExecutor(max_workers=n_cores)
88 |
89 | for project_index in project_structure.keys():
90 | self.model_ids.append(project_index)
91 |
92 | # def run():
93 | project = COLMAPProject(project_path=project_structure[project_index],
94 | project_index=project_index,
95 | dense_pc=dense_pc,
96 | bg_color=bg_color,
97 | exif_read=exif_read,
98 | img_orig=self._img_orig_path,
99 | output_status_function=output_status_function,
100 | oriented=oriented)
101 |
102 | self.project_list.append(project)
103 |
104 | # executor.submit(run)
105 |
106 | # executor.shutdown(wait=True)
107 |
108 | @property
109 | def projects(self):
110 | """
111 | Get the list of projects.
112 |
113 | Returns:
114 | List: List of COLMAPProject objects.
115 | """
116 | return self.project_list
117 |
118 | @projects.setter
119 | def projects(self, projects):
120 | self.project_list = projects
121 |
122 | def fuse_projects(self):
123 | """
124 | Fuse the projects.
125 |
126 | Returns:
127 | NotImplementedError: The function is not implemented yet.
128 | """
129 | return NotImplementedError
130 |
131 | def save_project(self, output_path):
132 | """
133 | Save the project.
134 |
135 | Args:
136 | output_path: The output path to save the project.
137 |
138 | Returns:
139 | NotImplementedError: The function is not implemented yet.
140 | """
141 | return NotImplementedError
142 |
143 |
144 | if __name__ == '__main__':
145 | from colmap_wrapper.visualization import ColmapVisualization
146 | from colmap_wrapper import USER_NAME
147 |
148 | MODE = 'multi'
149 |
150 | if MODE == "single":
151 | from colmap_wrapper.data.download import Dataset
152 |
153 | downloader = Dataset()
154 | downloader.download_bunny_dataset()
155 |
156 | project = COLMAPLoader(project_path=downloader.file_path)
157 |
158 | colmap_project = project.project
159 |
160 | camera = colmap_project.cameras
161 | images = colmap_project.images
162 | sparse = colmap_project.get_sparse()
163 | dense = colmap_project.get_dense()
164 |
165 | project_vs = ColmapVisualization(colmap=colmap_project, image_resize=0.4)
166 | project_vs.visualization(frustum_scale=0.8, image_type='image')
167 | elif MODE == "multi":
168 | project = COLMAPLoader(project_path='/home/{}/Dropbox/07_data/For5G/22_11_14/reco'.format(USER_NAME),
169 | dense_pc='fused.ply')
170 |
171 | for model_idx, COLMAP_MODEL in enumerate(project.projects):
172 | camera = COLMAP_MODEL.cameras
173 | images = COLMAP_MODEL.images
174 | sparse = COLMAP_MODEL.get_sparse()
175 | dense = COLMAP_MODEL.get_dense()
176 | project_vs = ColmapVisualization(colmap=COLMAP_MODEL, image_resize=0.4)
177 | project_vs.visualization(frustum_scale=0.8, image_type='image')
178 |
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/project.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | Code for COLMAP readout borrowed from https://github.com/uzh-rpg/colmap_utils/tree/97603b0d352df4e0da87e3ce822a9704ac437933
6 | """
7 | import os.path
8 | # Built-in/Generic Imports
9 | import warnings
10 | from concurrent.futures import ThreadPoolExecutor, wait
11 | from multiprocessing import cpu_count
12 | from enum import Enum
13 | from pathlib import Path
14 | from typing import Union
15 |
16 | # Libs
17 | import pycolmap
18 | import numpy as np
19 | import open3d as o3d
20 | import exiftool
21 |
22 | # Own modules
23 |
24 | from colmap_wrapper.dataloader import (Camera, Intrinsics, read_array, read_images_text, read_points3D_text,
25 | read_points3d_binary, read_images_binary, generate_colmap_sparse_pc,
26 | write_images_binary, write_points3D_binary)
27 | from colmap_wrapper.dataloader.bin import read_cameras_text
28 |
29 |
30 | class LoadElement(Enum):
31 | PATHS_AND_ATTRIBUTES = 0
32 | CAMERAS = 1
33 | IMAGES = 2
34 | SPARSE_MODEL = 3
35 | DENSE_MODEL = 4
36 | DEPTH_STRUCTURE = 5
37 | EXIF_DATA = 6
38 | IMAGE_INFO = 8
39 |
40 |
41 | class LoadElementStatus:
42 | def __init__(self, element: LoadElement, project, finished=False, idx=None, current_id=None, max_id=None):
43 | self.element = element
44 | self.project = project
45 | self.finished = finished
46 |
47 | if idx != None:
48 | self.idx = idx
49 | self.current_id = -1
50 | self.max_id = 1
51 |
52 | if max_id != None or current_id != None:
53 | self.current_id = current_id
54 | self.max_id = max_id
55 |
56 | def isStarted(self):
57 | return not self.finished
58 |
59 | def isFinished(self):
60 | return self.finished
61 |
62 | def getElement(self):
63 | return self.element
64 |
65 |
66 | class PhotogrammetrySoftware(object):
67 | def __init__(self, project_path):
68 | self._project_path = project_path
69 |
70 | self.sparse = None
71 | self.dense = None
72 |
73 | def __read_images(self):
74 | return NotImplementedError
75 |
76 | def get_sparse(self):
77 | return generate_colmap_sparse_pc(self.sparse)
78 |
79 | def get_dense(self):
80 | return self.dense
81 |
82 |
83 | class COLMAPProject(PhotogrammetrySoftware):
84 | def __init__(self, project_path: [dict, str],
85 | project_index: int = 0,
86 | dense_pc: str = 'fused.ply',
87 | bg_color: np.ndarray = np.asarray([1, 1, 1]),
88 | exif_read=False,
89 | img_orig: Union[str, Path, None] = None,
90 | output_status_function=None,
91 | oriented: bool = False):
92 | """
93 | This is a simple COLMAP project wrapper to simplify the readout of a COLMAP project.
94 | THE COLMAP project is assumed to be in the following workspace folder structure as suggested in the COLMAP
95 | documentation (https://colmap.github.io/format.html):
96 |
97 | +── images
98 | │ +── image1.jpg
99 | │ +── image2.jpg
100 | │ +── ...
101 | +── sparse
102 | │ +── cameras.txt
103 | │ +── images.txt
104 | │ +── points3D.txt
105 | +── stereo
106 | │ +── consistency_graphs
107 | │ │ +── image1.jpg.photometric.bin
108 | │ │ +── image2.jpg.photometric.bin
109 | │ │ +── ...
110 | │ +── depth_maps
111 | │ │ +── image1.jpg.photometric.bin
112 | │ │ +── image2.jpg.photometric.bin
113 | │ │ +── ...
114 | │ +── normal_maps
115 | │ │ +── image1.jpg.photometric.bin
116 | │ │ +── image2.jpg.photometric.bin
117 | │ │ +── ...
118 | │ +── patch-match.cfg
119 | │ +── fusion.cfg
120 | +── fused.ply
121 | +── meshed-poisson.ply
122 | +── meshed-delaunay.ply
123 | +── run-dataloader-geometric.sh
124 | +── run-dataloader-photometric.sh
125 |
126 | @param project_path: path to dataloader project
127 | @param dense_pc: path to dense point cloud (Might be useful if pc has been renamed or deviades from fused.ply)
128 | @param bg_color: background color for visualization
129 | """
130 |
131 | PhotogrammetrySoftware.__init__(self, project_path=project_path)
132 |
133 | self.id = project_index
134 | self.load_depth = True
135 | self.output_status_function = output_status_function
136 | if output_status_function:
137 | self.output_status_function(
138 | LoadElementStatus(element=LoadElement.PATHS_AND_ATTRIBUTES, project=self, finished=False))
139 |
140 | # Flag to read exif data (takes long for large image sets)
141 | self.exif_read = exif_read
142 |
143 | # Search and Set Paths
144 | if isinstance(project_path, str):
145 | self._project_path: Path = Path(project_path)
146 |
147 | if '~' in str(self._project_path):
148 | self._project_path: Path = self._project_path.expanduser()
149 |
150 | self._sparse_base_path = self._project_path.joinpath('sparse')
151 | if not self._sparse_base_path.exists():
152 | raise ValueError('Colmap project structure: sparse folder (cameras, images, points3d) can not be found')
153 | if self._sparse_base_path.joinpath('0').exists():
154 | self._sparse_base_path: Path = self._sparse_base_path.joinpath('0')
155 |
156 | self._dense_base_path = self._project_path.joinpath('dense')
157 | if self._dense_base_path.joinpath('0').exists():
158 | self._dense_base_path: Path = self._dense_base_path.joinpath('0')
159 |
160 | if not self._dense_base_path.exists():
161 | self._dense_base_path: Path = self._project_path
162 | elif isinstance(project_path, dict):
163 | self._project_path: Path = project_path['project_path']
164 | if not project_path['dense'].exists():
165 | self._dense_base_path: Path = self._project_path
166 | else:
167 | self._dense_base_path: Path = project_path['dense']
168 | self._sparse_base_path: Path = project_path['sparse']
169 | else:
170 | raise ValueError("{}".format(self._project_path))
171 |
172 | # Loads undistorted images
173 | self._src_image_path: Path = self._dense_base_path.joinpath('images')
174 | if not self._src_image_path.exists():
175 | self._src_image_path: Path = self._project_path.joinpath('images')
176 | self._fused_path: Path = self._dense_base_path.joinpath(dense_pc)
177 | self._stereo_path: Path = self._dense_base_path.joinpath('stereo')
178 | self._depth_image_path: Path = self._stereo_path.joinpath('depth_maps')
179 |
180 | # Check if depth images are in sub folder
181 | if len(list(self._depth_image_path.glob('*.bin'))) == 0:
182 | if len(list(self._depth_image_path.glob('*'))) == 1:
183 | self._depth_image_path = list(self._depth_image_path.glob('*'))[0]
184 | else:
185 | print('Warning: Multiple depth folders!')
186 | max_items = 0
187 | for path_i in list(self._depth_image_path.glob('*')):
188 | current_item_len = len(list(path_i.glob('*')))
189 | if current_item_len > max_items:
190 | max_items = current_item_len
191 | self._depth_image_path = path_i
192 |
193 | self._normal_image_path: Path = self._stereo_path.joinpath('normal_maps')
194 |
195 | self.__project_ini_path: Path = self._project_path / 'sparse' / str(self.id) / 'project.ini'
196 |
197 | self._img_orig_path: Union[Path, None] = Path(img_orig) if img_orig else None
198 |
199 | files: list = []
200 | types: tuple = ('*.txt', '*.bin')
201 | for t in types:
202 | files.extend(self._sparse_base_path.glob(t))
203 |
204 | for file_path in files:
205 | if 'cameras' in file_path.name:
206 | self._camera_path = file_path
207 | elif 'images' in file_path.name:
208 | if oriented:
209 | if 'oriented' in file_path.name:
210 | self._image_path = file_path
211 | else:
212 | if 'oriented' in file_path.name:
213 | continue
214 | self._image_path = file_path
215 | elif 'points3D' in file_path.name:
216 | if oriented:
217 | if 'oriented' in file_path.name:
218 | self._points3D_path = file_path
219 | else:
220 | if 'oriented' in file_path.name:
221 | continue
222 | self._points3D_path = file_path
223 | elif 'transformation' in file_path.name:
224 | self._transformation_matrix = np.loadtxt(file_path)
225 | else:
226 | raise ValueError('Unkown file in sparse folder')
227 |
228 | self.vis_bg_color: np.ndarray = bg_color
229 | self.project_ini = self.__read_project_init_file()
230 |
231 | if output_status_function:
232 | self.output_status_function(
233 | LoadElementStatus(element=LoadElement.PATHS_AND_ATTRIBUTES, project=self, finished=True))
234 |
235 | self.read()
236 |
237 | if output_status_function: # Maybe not reset?
238 | self.output_status_function = None
239 |
240 | def read(self):
241 | """
242 | Start reading all necessary information
243 |
244 | @return:
245 | """
246 |
247 | n_cores = cpu_count()
248 | executor = ThreadPoolExecutor(max_workers=n_cores)
249 |
250 | futures: list = []
251 |
252 | futures.append(executor.submit(self.__read_cameras))
253 | futures.append(executor.submit(self.__read_images))
254 | futures.append(executor.submit(self.__read_sparse_model))
255 | futures.append(executor.submit(self.__read_dense_model))
256 | futures.append(executor.submit(self.__read_depth_structure))
257 |
258 | wait(futures)
259 | futures.clear()
260 |
261 | futures.append(executor.submit(self.__add_infos, executor))
262 | futures.append(executor.submit(self.__read_exif_data))
263 |
264 | wait(futures)
265 | executor.shutdown(wait=True)
266 |
267 | def __read_project_init_file(self):
268 | if self.__project_ini_path.exists():
269 | PROJECT_CLASS = 'Basic'
270 | project_ini = {PROJECT_CLASS: {}}
271 | with open(self.__project_ini_path.__str__(), 'r') as file:
272 | for line in file:
273 | elements = line.split('=')
274 | if len(elements) == 1:
275 | PROJECT_CLASS = elements[0].strip('\n')
276 | project_ini.update({PROJECT_CLASS: {}})
277 | continue
278 | if elements[0] == 'image_path':
279 | project_ini[PROJECT_CLASS].update({'image_path_orig': self._img_orig_path.__str__()})
280 | project_ini[PROJECT_CLASS].update({elements[0]: elements[1].strip('\n')})
281 | return project_ini
282 | else:
283 | return {}
284 |
285 | def __read_exif_data(self):
286 | if self.output_status_function:
287 | self.output_status_function(LoadElementStatus(element=LoadElement.EXIF_DATA, project=self, finished=False))
288 |
289 | if self.exif_read:
290 | if self.__project_ini_path.exists():
291 | try:
292 | for image_idx in self.images.keys():
293 | if self._image_path:
294 | self.images[image_idx].original_filename: Path = Path(
295 | self.project_ini['Basic']['image_path_orig']) / self.images[image_idx].name
296 | else:
297 | self.images[image_idx].original_filename: Path = Path(
298 | self.project_ini['Basic']['image_path']) / self.images[image_idx].name
299 | with exiftool.ExifToolHelper() as et:
300 | metadata = et.get_metadata(self.images[image_idx].original_filename.__str__())
301 | self.images[image_idx].exifdata = metadata[0]
302 | except exiftool.exceptions.ExifToolExecuteError as error:
303 | # traceback.print_exc()
304 | warnings.warn("Exif Data could not be read.")
305 |
306 | if self.output_status_function:
307 | self.output_status_function(LoadElementStatus(element=LoadElement.EXIF_DATA, project=self, finished=True))
308 |
309 | def __add_infos(self, executor: ThreadPoolExecutor = None):
310 | """
311 | @return:
312 | """
313 |
314 | self.max_depth_scaler = 0
315 | self.max_depth_scaler_photometric = 0
316 |
317 | current_image = 0
318 | count_images = len(self.images)
319 |
320 | for image_idx in self.images.keys():
321 | def run(image_idx=image_idx, current_image=current_image, count_images=count_images):
322 | if self.output_status_function:
323 | self.output_status_function(
324 | LoadElementStatus(element=LoadElement.IMAGE_INFO, project=self, finished=False, idx=image_idx,
325 | current_id=current_image, max_id=count_images))
326 |
327 | self.images[image_idx].path = self._src_image_path / self.images[image_idx].name
328 |
329 | self.images[image_idx].depth_image_geometric_path = next(
330 | (p for p in self.depth_path_geometric if self.images[image_idx].name in p), None)
331 | self.images[image_idx].depth_image_photometric_path = next(
332 | (p for p in self.depth_path_photometric if self.images[image_idx].name in p), None)
333 |
334 | self.images[image_idx].intrinsics = Intrinsics(camera=self.cameras[self.images[image_idx].camera_id])
335 |
336 | self.images[image_idx].path = self._src_image_path / self.images[image_idx].name
337 |
338 | if self.load_depth:
339 | if self.output_status_function:
340 | self.output_status_function(
341 | LoadElementStatus(element=LoadElement.DEPTH_IMAGE, project=self, finished=False,
342 | idx=image_idx, current_id=current_image, max_id=count_images))
343 |
344 | self.images[image_idx].depth_image_geometric = read_array(
345 | path=next((p for p in self.depth_path_geometric if self.images[image_idx].name in p), None))
346 |
347 | # print(self.images[image_idx].name)
348 | print(next((p for p in self.depth_path_geometric if self.images[image_idx].name in p), None))
349 | # print('\n')
350 |
351 | min_depth, max_depth = np.percentile(self.images[image_idx].depth_image_geometric, [5, 95])
352 |
353 | if max_depth > self.max_depth_scaler:
354 | self.max_depth_scaler = max_depth
355 |
356 | self.images[image_idx].depth_image_photometric = read_array(
357 | path=next((p for p in self.depth_path_photometric if self.images[image_idx].name in p), None))
358 |
359 | min_depth, max_depth = np.percentile(self.images[image_idx].depth_image_photometric, [5, 95])
360 |
361 | if max_depth > self.max_depth_scaler_photometric:
362 | self.max_depth_scaler_photometric = max_depth
363 |
364 | if self.output_status_function:
365 | self.output_status_function(
366 | LoadElementStatus(element=LoadElement.DEPTH_IMAGE, project=self, finished=True,
367 | idx=image_idx, current_id=current_image, max_id=count_images))
368 |
369 | else:
370 | self.images[image_idx].depth_image_geometric = None
371 | self.images[image_idx].depth_path_photometric = None
372 | # self.images[image_idx].normal_image = self.__read_depth_images
373 |
374 | if self.output_status_function:
375 | self.output_status_function(
376 | LoadElementStatus(element=LoadElement.IMAGE_INFO, project=self, finished=False, idx=image_idx,
377 | current_id=current_image, max_id=count_images))
378 |
379 | self.images[image_idx].intrinsics = Intrinsics(camera=self.cameras[self.images[image_idx].camera_id])
380 |
381 | # Fixing Strange Error when cy is negative
382 | if self.images[image_idx].intrinsics.cx < 0:
383 | pass
384 |
385 | if self.images[image_idx].intrinsics.cy < 0:
386 | pass
387 |
388 | if self.output_status_function:
389 | self.output_status_function(
390 | LoadElementStatus(element=LoadElement.IMAGE_INFO, project=self, finished=True, idx=image_idx,
391 | current_id=current_image, max_id=count_images))
392 |
393 | current_image += 1
394 | if executor != None:
395 | executor.submit(run)
396 | else:
397 | run()
398 |
399 | def __read_cameras(self):
400 | """
401 | Load camera model from file. Currently only Simple Radial and 'Pinhole' are supported. If the camera settings
402 | are identical for all images only one camera is provided. Otherwise, every image has its own camera model.
403 |
404 | @return:
405 | """
406 |
407 | if self.output_status_function:
408 | self.output_status_function(LoadElementStatus(element=LoadElement.CAMERAS, project=self, finished=False))
409 |
410 | if (self._sparse_base_path / ('cameras.txt')).exists():
411 | cameras = read_cameras_text(self._sparse_base_path / 'cameras.txt')
412 |
413 | self.cameras = {}
414 | for camera_id, camera in cameras.items():
415 | if camera.model == 'SIMPLE_RADIAL':
416 | params = np.asarray([camera.params[0], # fx
417 | camera.params[0], # fy
418 | camera.params[1], # cx
419 | camera.params[2], # cy
420 | camera.params[3]]) # k1
421 | # cv2.getOptimalNewCameraMatrix(camera.calibration_matrix(), [k, 0, 0, 0], (camera.width, camera.height), )
422 |
423 | elif camera.model == 'PINHOLE':
424 | params = np.asarray([camera.params[0], # fx
425 | camera.params[1], # fy
426 | camera.params[2], # cx
427 | camera.params[3], # cy
428 | 0]) # k1
429 |
430 | else:
431 | raise NotImplementedError('Model {} is not implemented!'.format(camera.model_name))
432 |
433 | camera_params = Camera(id=camera.id,
434 | model=camera.model,
435 | width=camera.width,
436 | height=camera.height,
437 | params=params)
438 |
439 | self.cameras.update({camera_id: camera_params})
440 |
441 | else:
442 | reconstruction = pycolmap.Reconstruction(self._sparse_base_path)
443 | self.cameras = {}
444 | for camera_id, camera in reconstruction.cameras.items():
445 | if camera.model_name == 'SIMPLE_RADIAL':
446 | params = np.asarray([camera.params[0], # fx
447 | camera.params[0], # fy
448 | camera.params[1], # cx
449 | camera.params[2], # cy
450 | camera.params[3]]) # k1
451 | # cv2.getOptimalNewCameraMatrix(camera.calibration_matrix(), [k, 0, 0, 0], (camera.width, camera.height), )
452 |
453 | elif camera.model_name == 'PINHOLE':
454 | params = np.asarray([camera.params[0], # fx
455 | camera.params[1], # fy
456 | camera.params[2], # cx
457 | camera.params[3], # cy
458 | 0]) # k1
459 |
460 | else:
461 | raise NotImplementedError('Model {} is not implemented!'.format(camera.model_name))
462 |
463 | camera_params = Camera(id=camera.camera_id,
464 | model=camera.model_name,
465 | width=camera.width,
466 | height=camera.height,
467 | params=params)
468 |
469 | self.cameras.update({camera_id: camera_params})
470 |
471 | if self.output_status_function:
472 | self.output_status_function(LoadElementStatus(element=LoadElement.CAMERAS, project=self, finished=True))
473 |
474 | def __read_images(self):
475 | """
476 | Load infos about images from either image.bin or image.txt file and saves it into an Image object which contains
477 | information about camera_id, camera extrinsics, image_name, triangulated points (3D), keypoints (2D), etc.
478 |
479 | @return:
480 | """
481 |
482 | if self.output_status_function:
483 | self.output_status_function(LoadElementStatus(element=LoadElement.IMAGES, project=self, finished=False))
484 |
485 | if self._image_path.suffix == '.txt':
486 | self.images = read_images_text(self._image_path)
487 | elif self._image_path.suffix == '.bin':
488 | self.images = read_images_binary(self._image_path)
489 | else:
490 | raise FileNotFoundError('Wrong extension found, {}'.format(self._camera_path.suffix))
491 |
492 | if self.output_status_function:
493 | self.output_status_function(LoadElementStatus(element=LoadElement.IMAGES, project=self, finished=True))
494 |
495 | def __read_sparse_model(self):
496 | """
497 | Read sparse points from either points3D.bin or points3D.txt file. Every point is saved as an Point3D object
498 | containing information about error, image_ids (from which image can this point be seen?), points2D-idx
499 | (which keypoint idx is the observation of this triangulated point), rgb value and xyz position.
500 |
501 | @return:
502 | """
503 | if self.output_status_function:
504 | self.output_status_function(
505 | LoadElementStatus(element=LoadElement.SPARSE_MODEL, project=self, finished=False))
506 |
507 | if self._points3D_path.suffix == '.txt':
508 | self.sparse = read_points3D_text(self._points3D_path)
509 | elif self._points3D_path.suffix == '.bin':
510 | self.sparse = read_points3d_binary(self._points3D_path)
511 | else:
512 | raise FileNotFoundError('Wrong extension found, {}'.format(self._camera_path.suffix))
513 |
514 | if self.output_status_function:
515 | self.output_status_function(
516 | LoadElementStatus(element=LoadElement.SPARSE_MODEL, project=self, finished=True))
517 |
518 | def __read_depth_structure(self):
519 | """
520 | Loads the path for both depth map types ('geometric and photometric') of the reconstruction project.
521 |
522 | @return:
523 | """
524 | if self.output_status_function:
525 | self.output_status_function(
526 | LoadElementStatus(element=LoadElement.DEPTH_STRUCTURE, project=self, finished=False))
527 |
528 | self.depth_path_geometric = []
529 | self.depth_path_photometric = []
530 |
531 | for depth_path in list(self._depth_image_path.glob('*.bin')):
532 | if 'geometric' in depth_path.__str__():
533 | self.depth_path_geometric.append(depth_path.__str__())
534 | elif 'photometric' in depth_path.__str__():
535 | self.depth_path_photometric.append(depth_path.__str__())
536 | else:
537 | raise ValueError('Unkown depth image type: {}'.format(path))
538 |
539 | if self.output_status_function:
540 | self.output_status_function(
541 | LoadElementStatus(element=LoadElement.DEPTH_STRUCTURE, project=self, finished=True))
542 |
543 | def __read_dense_model(self):
544 | """
545 | Load dense point cloud from path.
546 |
547 | @return:
548 | """
549 | if self.output_status_function:
550 | self.output_status_function(
551 | LoadElementStatus(element=LoadElement.DENSE_MODEL, project=self, finished=False))
552 |
553 | self.dense = o3d.io.read_point_cloud(self._fused_path.__str__())
554 |
555 | if self.output_status_function:
556 | self.output_status_function(LoadElementStatus(element=LoadElement.DENSE_MODEL, project=self, finished=True))
557 |
558 | def transform_poses(self, T):
559 | from colmap_wrapper.colmap import (rotmat2qvec)
560 | for image_idx in self.images.keys():
561 | self.images[image_idx].extrinsics = T @ self.images[image_idx].extrinsics
562 | self.images[image_idx].qvec = rotmat2qvec(self.images[image_idx].extrinsics[:3, :3].T.flatten())
563 | self.images[image_idx].tvec = -np.dot(-self.images[image_idx].extrinsics[:3, 3],
564 | -self.images[image_idx].qvec2rotmat().T)
565 | self.images[image_idx].set_extrinsics()
566 |
567 | def transform_dense(self, T):
568 | self.dense.transform(T)
569 |
570 | def transform_sparse(self, T):
571 | for point_idx in self.sparse.keys():
572 | homogeneous_coord = np.hstack([self.sparse[point_idx].xyz, np.asarray([1])])
573 | homogeneous_coord = T @ homogeneous_coord
574 | self.sparse[point_idx].xyz = (homogeneous_coord / homogeneous_coord[3])[:3]
575 |
576 | def transform(self, T):
577 | self.transform_poses(T)
578 | self.transform_dense(T)
579 | self.transform_sparse(T)
580 |
581 | def save(self):
582 | path_pc = os.path.join(self._dense_base_path, 'fused_oriented.ply')
583 | o3d.io.write_point_cloud(path_pc, self.dense)
584 |
585 | path_sparse_points = os.path.join(self._sparse_base_path, 'points3D_oriented.bin')
586 | write_points3D_binary(self.sparse, path_sparse_points)
587 |
588 | path_image = os.path.join(self._sparse_base_path, 'images_oriented.bin')
589 | write_images_binary(self.images, path_image)
590 |
591 | def save_transform(self, T):
592 | '''
593 | Info: https://math.stackexchange.com/questions/3846913/finding-the-scale-factor-and-rotation-angle-of-a-matrix
594 | '''
595 | path_transform = os.path.join(self._sparse_base_path, 'transformation.txt')
596 | np.savetxt(path_transform, T)
597 |
598 |
599 | if __name__ == '__main__':
600 | from colmap_wrapper.data.download import Dataset
601 | from colmap_wrapper.visualization import ColmapVisualization
602 |
603 | downloader = Dataset()
604 | downloader.download_bunny_dataset()
605 |
606 | project = COLMAPProject(project_path=downloader.file_path)
607 |
608 | camera = project.cameras
609 | images = project.images
610 | sparse = project.get_sparse()
611 | dense = project.get_dense()
612 |
613 | project_vs = ColmapVisualization(colmap=project, image_resize=0.4)
614 | project_vs.visualization(frustum_scale=0.8, image_type='image')
615 |
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/dataloader/test.png
--------------------------------------------------------------------------------
/colmap_wrapper/dataloader/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | from copy import deepcopy
10 |
11 | import numpy as np
12 | import open3d as o3d
13 | import exiftool
14 | import matplotlib.pyplot as plt
15 |
16 | def get_labeled_exif(exif):
17 | labeled = {}
18 | for (key, val) in exif.items():
19 | labeled[TAGS.get(key)] = val
20 | return labeled
21 |
22 |
23 | def load_image_meta_data(image_path: str) -> np.ndarray:
24 | """
25 | Load Exif meta data
26 |
27 | :param image_path:
28 | :return:
29 | """
30 |
31 | with exiftool.ExifToolHelper() as et:
32 | metadata = et.get_metadata(image_path)
33 |
34 | return metadata[0]
35 |
36 |
37 | def qvec2rotmat(qvec):
38 | return np.array([
39 | [1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
40 | 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
41 | 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
42 | [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
43 | 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
44 | 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
45 | [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
46 | 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
47 | 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]])
48 |
49 |
50 | def rotmat2qvec(R):
51 | Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
52 | K = np.array([
53 | [Rxx - Ryy - Rzz, 0, 0, 0],
54 | [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
55 | [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
56 | [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
57 | eigvals, eigvecs = np.linalg.eigh(K)
58 | qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
59 | if qvec[0] < 0:
60 | qvec *= -1
61 | return qvec
62 |
63 |
64 | def convert_colmap_extrinsics(frame):
65 | Rwc = frame.Rwc()
66 | twc = frame.twc()
67 |
68 | M = np.eye(4)
69 | M[:3, :3] = Rwc
70 | M[:3, 3] = twc
71 |
72 | return Rwc, twc, M
73 |
74 |
75 | def generate_colmap_sparse_pc(points3D: np.ndarray) -> o3d.pybind.geometry.PointCloud:
76 | sparse_pc = np.zeros((points3D.__len__(), 3))
77 | sparse_pc_color = np.zeros((points3D.__len__(), 3))
78 |
79 | for idx, pc_idx in enumerate(points3D.__iter__()):
80 | sparse_pc[idx] = points3D[pc_idx].xyz
81 | sparse_pc_color[idx] = points3D[pc_idx].rgb / 255.
82 |
83 | pc = o3d.geometry.PointCloud()
84 | pc.points = o3d.utility.Vector3dVector(sparse_pc)
85 | pc.colors = o3d.utility.Vector3dVector(sparse_pc_color)
86 |
87 | return pc
88 |
89 |
90 | def plot_disparity_map(disparity_image):
91 | fig, ax = plt.subplots(figsize=(25, 25))
92 | min_depth, max_depth = np.percentile(disparity_image, [5, 90])
93 | disparity_image[disparity_image < min_depth] = min_depth
94 | disparity_image[disparity_image > max_depth] = max_depth
95 | disparity_image[disparity_image == 0] = max_depth + 0.1 * max_depth
96 | pos = ax.imshow(disparity_image, cmap="Greys")
97 | plt.axis('off')
98 | cbar = fig.colorbar(pos, ax=ax, shrink=0.52)
99 | cbar.ax.tick_params(size=0)
100 | cbar.set_ticks([])
101 | plt.show()
102 |
103 |
104 | def plot_alpha_blending(rgb_image, disparity_image):
105 | rgb_image = deepcopy(rgb_image)
106 | disparity_image = deepcopy(disparity_image)
107 | fig, ax = plt.subplots(figsize=(15, 15))
108 | min_depth, max_depth = np.percentile(disparity_image, [5, 95])
109 | disparity_image[disparity_image < min_depth] = min_depth
110 | disparity_image[disparity_image > max_depth] = max_depth
111 | disparity_image[disparity_image == 0] = max_depth + 0.1 * max_depth
112 | pos = ax.imshow(disparity_image, cmap="Greys")
113 | plt.axis('off')
114 |
115 | pos = ax.imshow(rgb_image, alpha=0.3) # interpolation='none'plt.imshow(dst)
116 | plt.show()
117 |
--------------------------------------------------------------------------------
/colmap_wrapper/gps/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | from colmap_wrapper.gps.registration import *
10 | from colmap_wrapper.gps.utils import *
11 | from colmap_wrapper.gps.visualization import *
12 |
--------------------------------------------------------------------------------
/colmap_wrapper/gps/map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/gps/map.png
--------------------------------------------------------------------------------
/colmap_wrapper/gps/registration.py:
--------------------------------------------------------------------------------
1 | import pylab as plt
2 | from typing import Literal
3 | import random
4 |
5 | # Own Modules
6 | from colmap_wrapper.gps.utils import *
7 | from colmap_wrapper.gps.visualization import GPSVisualization
8 |
9 |
10 | class GPSRegistration(object):
11 | def __init__(self):
12 | self.project_list: list = []
13 |
14 | def __gps_extract(self, debug: bool = False):
15 | """
16 | Extracts GPS data and extrinsic camera parameters from the project list.
17 |
18 | Args:
19 | debug (bool, optional): Whether to enable debug mode for intermediate visualizations. Default is False.
20 |
21 | Returns:
22 | None
23 |
24 | Note:
25 | - This function populates the `gps_data` and `extrinsic_data` attributes of the object.
26 |
27 | """
28 | gps_data = {}
29 | extrinsics = {}
30 | for model_idx, model in enumerate(self.project_list):
31 | gps_data.update({model_idx: []})
32 | extrinsics.update({model_idx: []})
33 | for image_idx in model.images.keys():
34 | gps_data[model_idx].append([float(model.images[image_idx].exifdata["XMP:GPSLatitude"]),
35 | float(model.images[image_idx].exifdata["XMP:GPSLongitude"]),
36 | float(model.images[image_idx].exifdata["XMP:AbsoluteAltitude"])])
37 | extrinsics[model_idx].append(model.images[image_idx].extrinsics)
38 |
39 | self.gps_data = {}
40 |
41 | for model_idx in gps_data.keys():
42 | # Show gps --> x and y
43 | gps_certesian = np.asarray(
44 | [convert_to_cartesian(lat, long, elev) for lat, long, elev in gps_data[model_idx]])
45 | if debug:
46 | plt.scatter(gps_certesian[:, 0], gps_certesian[:, 1])
47 |
48 | self.gps_data.update({model_idx: gps_certesian})
49 |
50 | if debug:
51 | plt.show()
52 |
53 | self.extrinsic_data = {}
54 |
55 | for model_idx in extrinsics.keys():
56 | # Show extrinsics (translational part)
57 | trans_certesian = np.asarray(
58 | [[array[:3, -1][0], array[:3, -1][1], array[:3, -1][2]] for array in extrinsics[model_idx]])
59 | if debug:
60 | plt.scatter(trans_certesian[:, 0], trans_certesian[:, 1])
61 |
62 | self.extrinsic_data.update({model_idx: trans_certesian})
63 |
64 | if debug:
65 | plt.show()
66 |
67 | def gps_visualize(self, map_path: str, osm_boarders: tuple, save_as: str, debug: bool = False):
68 | """
69 | Extracts GPS data and visualizes it on an OpenStreetMap (OSM) map image.
70 |
71 | Args:
72 | map_path (str): The path to the OSM map image.
73 | osm_boarders (tuple): The latitude and longitude boundaries of the map (min_lat, max_lat, min_lon, max_lon).
74 | save_as (str): The filename to save the final visualization.
75 | debug (bool, optional): Whether to enable debug mode for intermediate visualizations. Default is False.
76 |
77 | Returns:
78 | None
79 |
80 | Note:
81 | - OSM maps can be downloaded from: https://www.openstreetmap.org/export#map=5/33.174/14.590
82 | - For more information on visualizing GPS data on OSM maps, refer to: https://towardsdatascience.com/simple-gps-data-visualization-using-python-and-open-street-maps-50f992e9b676
83 |
84 | """
85 | self.__gps_extract(debug=debug)
86 |
87 | vis = GPSVisualization(gps_data=self.gps_data,
88 | map_path=map_path,
89 | points=osm_boarders) # Draw converted records to the map image.
90 |
91 | vis.create_image(color=(0, 0, 255), width=0.2)
92 | vis.plot_map(show=debug, save_as=save_as)
93 |
94 | def coarse_align(self, debug: bool = False):
95 | """
96 | Performs coarse alignment of the point clouds using GPS data and extrinsic camera parameters.
97 |
98 | Args:
99 | debug (bool, optional): Whether to enable debug visualization. Default is False.
100 |
101 | Returns:
102 | None
103 |
104 | Raises:
105 | ValueError: If the number of sides in `project_list` is greater than 2.
106 |
107 | """
108 | if self.project_list.__len__() > 2:
109 | assert ValueError(
110 | 'Up to know it is only possible to align two sides. Currently {} sides are present'.format(
111 | self.project_list.__len__()))
112 |
113 | # Translate gps data to origin
114 | gps_offset = self.gps_data[0].mean(axis=0)
115 |
116 | # gps_data_origin = np.copy(self.gps_data)
117 | for model_idx in self.gps_data:
118 | for gps_idx, _ in enumerate(self.gps_data[model_idx]):
119 | self.gps_data[model_idx][gps_idx] -= gps_offset
120 |
121 | transformation_list = []
122 |
123 | for gps_idx, extrinsics_idx in zip(self.gps_data, self.extrinsic_data):
124 | R, c, t = kabsch_umeyama(self.gps_data[gps_idx], self.extrinsic_data[extrinsics_idx])
125 | transformation_list.append([R, c, t])
126 |
127 | if debug:
128 | colors = [(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0)]
129 | pcd = []
130 | for gps_idx in self.gps_data.keys():
131 | # Plot GPS
132 | gps_pcd = points2pcd((np.asarray(self.gps_data[gps_idx])))
133 | gps_pcd.paint_uniform_color(random.choice(colors))
134 | pcd.append(gps_pcd)
135 |
136 | o3d.visualization.draw_geometries(pcd)
137 |
138 | if debug:
139 | colors = [(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0)]
140 | gps_pcd_list = []
141 | extrinsics_pcd_list = []
142 | for idx in self.gps_data.keys():
143 | # Plot GPS
144 | color = random.choice(colors)
145 |
146 | gps_pcd = points2pcd((np.asarray(self.gps_data[idx])))
147 | gps_pcd.paint_uniform_color(color)
148 | gps_pcd_list.append(gps_pcd)
149 |
150 | # Plot Camera
151 | extrinsic_pcd = points2pcd((np.asarray(self.extrinsic_data[idx])))
152 | extrinsic_pcd.paint_uniform_color(color)
153 | extrinsics_pcd_list.append(extrinsic_pcd)
154 |
155 | o3d.visualization.draw_geometries(gps_pcd_list)
156 | o3d.visualization.draw_geometries(extrinsics_pcd_list)
157 |
158 | for idx, (R, c, t) in enumerate(transformation_list):
159 | T = np.eye(4)
160 | T[:3, :3] = c * R
161 | T[:3, -1] = t
162 |
163 | self.project_list[idx].transform(T=T)
164 | self.project_list[idx].save()
165 |
166 | if debug:
167 | pcd = []
168 | for idx in range(len(self.project_list)):
169 | pcd.append(self.project_list[idx].dense)
170 | o3d.visualization.draw_geometries(pcd)
171 |
172 | if debug:
173 | pcd = []
174 | for idx in range(len(self.project_list)):
175 | pcd.append(self.project_list[idx].dense)
176 | pcd.append(points2pcd((np.asarray(self.gps_data[idx]))))
177 | pcd.append(points2pcd((np.asarray(self.extrinsic_data[idx]))))
178 |
179 | o3d.visualization.draw_geometries(pcd)
180 |
181 | def fine_align(self, max_correspondence_distance: float = 0.02,
182 | init_transformation: np.ndarray = np.eye(4),
183 | estimation_method: Literal['point2point', 'point2plane'] = 'point2point',
184 | debug: bool = False):
185 | """
186 | Aligns two point clouds using the Iterative Closest Point (ICP) algorithm.
187 |
188 | Args:
189 | max_correspondence_distance (float, optional): The maximum correspondence distance for ICP. Default is 0.02.
190 | init_transformation (numpy.ndarray, optional): The initial transformation matrix. Default is the identity matrix.
191 | estimation_method (Literal['point2point', 'point2plane'], optional): The estimation method for transformation.
192 | Choose between 'point2point' and 'point2plane'. Default is 'point2point'.
193 | debug (bool, optional): Whether to visualize the aligned point clouds. Default is False.
194 |
195 | Returns:
196 | None
197 |
198 | Raises:
199 | ValueError: If the number of sides in `project_list` is greater than 2.
200 | ValueError: If the provided estimation method is unknown.
201 |
202 | """
203 | if self.project_list.__len__() > 2:
204 | assert ValueError(
205 | 'Up to know it is only possible to align two sides. Currently {} sides are present'.format(
206 | self.project_list.__len__()))
207 |
208 | if estimation_method == 'point2point':
209 | method = o3d.pipelines.registration.TransformationEstimationPointToPoint()
210 | elif estimation_method == 'point2plane':
211 | method = o3d.pipelines.registration.TransformationEstimationPointToPlane()
212 | else:
213 | raise ValueError(
214 | "Method {} is unknown; Choose between point2point and point2plane!".format(estimation_method))
215 |
216 | reg_p2p = o3d.pipelines.registration.registration_icp(source=self.project_list[0].dense,
217 | target=self.project_list[1].dense,
218 | max_correspondence_distance=max_correspondence_distance,
219 | init=init_transformation,
220 | estimation_method=method)
221 | if debug:
222 | print(reg_p2p)
223 | print("Transformation is:")
224 | print(reg_p2p.transformation)
225 |
226 | self.project_list[0].dense.transform(reg_p2p.transformation)
227 | if debug:
228 | o3d.visualization.draw_geometries([self.project_list[0].dense, self.project_list[1].dense])
229 |
230 | def align(self):
231 | self.coarse_align()
232 | self.fine_align()
233 |
234 |
235 | if __name__ == '__main__':
236 | from colmap_wrapper.dataloader.loader import COLMAPLoader
237 | from colmap_wrapper.visualization import ColmapVisualization
238 | from colmap_wrapper import USER_NAME
239 |
240 | if True:
241 | path = "/home/{}/Documents/reco/23_04_24/01".format(USER_NAME)
242 | img_orig = "/home/{}/Documents/reco/23_04_24/01/01_Satin".format(USER_NAME)
243 | else:
244 | path = '/media/{}/Samsung_T5/For5G/reco/23_03_17/03'.format(USER_NAME)
245 | img_orig = '/media/{}/Samsung_T5/For5G/data/23_03_17/03'.format(USER_NAME)
246 | project = COLMAPLoader(project_path=path,
247 | exif_read=True,
248 | img_orig=img_orig,
249 | dense_pc='cropped.ply')
250 |
251 | # project_vs = ColmapVisualization(dataloader=project, bg_color=np.asarray([0, 0, 0]))
252 | # project_vs.visualization(show_sparse=False, frustum_scale=0.4, image_type='image', point_size=0.001)
253 |
254 | project.gps_visualize(map_path='map.png',
255 | osm_boarders=(49.66721, 11.32313, 49.66412, 11.32784),
256 | save_as='./result_map.png')
257 |
258 | project.align() # project.coarse_align() + project.fine_align()
259 |
260 | # project.fuse_projects()
261 | # project.save_project()
262 |
263 | project_vs = ColmapVisualization(colmap=project, bg_color=np.asarray([0, 0, 0]))
264 | project_vs.visualization(show_sparse=False, frustum_scale=0.4, image_type='image', point_size=0.001)
265 |
--------------------------------------------------------------------------------
/colmap_wrapper/gps/result_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/gps/result_map.png
--------------------------------------------------------------------------------
/colmap_wrapper/gps/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/gps/test.png
--------------------------------------------------------------------------------
/colmap_wrapper/gps/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import open3d as o3d
3 |
4 |
5 | def points2pcd(points: np.ndarray) -> o3d.geometry.PointCloud:
6 | """
7 | Converts a numpy array to an Open3D point cloud.
8 |
9 | This function converts a numpy array representing the XYZ coordinates of points to an Open3D point cloud. It assigns a
10 | uniform blue color to the point cloud.
11 |
12 | Args:
13 | points (numpy.ndarray): Array of shape Nx3 with XYZ locations of points.
14 |
15 | Returns:
16 | open3d.geometry.PointCloud: An Open3D point cloud object with the converted points and blue color.
17 |
18 | """
19 |
20 | colors = [[0, 0, 1] for i in range(points.shape[0])]
21 | pcd = o3d.geometry.PointCloud()
22 | pcd.points = o3d.utility.Vector3dVector(points)
23 | pcd.colors = o3d.utility.Vector3dVector(colors)
24 |
25 | return pcd
26 |
27 |
28 | def kabsch_umeyama(pointset_A, pointset_B) -> tuple:
29 | """
30 | Applies the Kabsch-Umeyama algorithm to align two sets of points and compute the optimal rotation, scaling, and translation.
31 |
32 | The Kabsch-Umeyama algorithm finds the optimal transformation by minimizing the root-mean-square deviation (RMSD) between
33 | corresponding points in the two sets. It returns the rotation matrix, scaling factor, and translation vector that aligns
34 | pointset_A to pointset_B.
35 |
36 | Args:
37 | pointset_A (numpy.ndarray): Array of a set of points in n-dim.
38 | pointset_B (numpy.ndarray): Array of a set of points in n-dim.
39 |
40 | Returns:
41 | Tuple[numpy.ndarray, float, numpy.ndarray]: The rotation matrix (3x3), scaling factor (scalar), and translation vector (3x1).
42 |
43 | Raises:
44 | AssertionError: If the shapes of pointset_A and pointset_B do not match.
45 |
46 | Reference:
47 | Source and Explanation: https://zpl.fi/aligning-point-patterns-with-kabsch-umeyama-algorithm/
48 |
49 | """
50 | assert pointset_A.shape == pointset_B.shape
51 | n, m = pointset_A.shape
52 |
53 | # Find centroids of both point sets
54 | EA = np.mean(pointset_A, axis=0)
55 | EB = np.mean(pointset_B, axis=0)
56 |
57 | VarA = np.mean(np.linalg.norm(pointset_A - EA, axis=1) ** 2)
58 |
59 | # Covariance matrix
60 | H = ((pointset_A - EA).T @ (pointset_B - EB)) / n
61 |
62 | # SVD H = UDV^T
63 | U, D, VT = np.linalg.svd(H)
64 |
65 | # Detect and prevent reflection
66 | d = np.sign(np.linalg.det(U) * np.linalg.det(VT))
67 | S = np.diag([1] * (m - 1) + [d])
68 |
69 | # rotation, scaling and translation
70 | R = U @ S @ VT
71 | c = VarA / np.trace(np.diag(D) @ S)
72 | t = EA - c * R @ EB
73 |
74 | return R, c, t
75 |
76 |
77 | def convert_to_cartesian(lat: float, lon: float, elevation: float) -> tuple:
78 | """
79 | Converts latitude, longitude, and elevation to Cartesian coordinates.
80 |
81 | Args:
82 | lat (float): Latitude in degrees.
83 | lon (float): Longitude in degrees.
84 | elevation (float): Elevation in meters.
85 |
86 | Returns:
87 | Tuple[float, float, float]: Cartesian coordinates (x, y, z).
88 |
89 | Note:
90 | - This function assumes WGS84 ellipsoid and uses the radius of the Earth plus elevation to calculate Cartesian coordinates.
91 |
92 | Reference:
93 | https://itecnote.com/tecnote/python-how-to-convert-longitudelatitude-elevation-to-cartesian-coordinates/
94 |
95 | """
96 | lat, lon = np.deg2rad(lat), np.deg2rad(lon)
97 | R = 6378137.0 + elevation # radius of the earth
98 |
99 | x = R * np.cos(lat) * np.cos(lon)
100 | y = R * np.cos(lat) * np.sin(lon)
101 | z = R * np.sin(lat)
102 |
103 | return x, y, z
104 |
--------------------------------------------------------------------------------
/colmap_wrapper/gps/visualization.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from PIL import Image, ImageDraw
5 |
6 | # Built-in/Generic Imports
7 | from typing import Tuple
8 |
9 | # Built-in/Generic Imports
10 | # ...
11 |
12 | # Libs
13 | import numpy as np
14 | import open3d as o3d
15 | from PIL import Image, ImageFont, ImageDraw
16 | from pyquaternion import Quaternion
17 |
18 |
19 | # Own modules
20 | # ...
21 |
22 |
23 | class GPSVisualization(object):
24 | """
25 | Class for GPS data visualization using pre-downloaded OSM map in image format.
26 | Source: https://github.com/tisljaricleo/GPS-visualization-Python
27 | """
28 |
29 | def __init__(self, gps_data: dict, map_path: str, points: tuple):
30 | """
31 | Initialize the GPSVisualization class.
32 |
33 | Args:
34 | gps_data (dict): Raw GPS data.
35 | map_path (str): Path to pre-downloaded OSM map in image format.
36 | points (tuple): Upper-left and lower-right GPS points of the map (lat1, lon1, lat2, lon2).
37 | """
38 | gps_data_osm = []
39 | for key in gps_data.keys():
40 | gps_data_osm.extend(gps_data[key])
41 |
42 | self.gps_data = gps_data_osm
43 | self.points = points
44 | self.map_path = map_path
45 |
46 | self.result_image = Image
47 | self.x_ticks = []
48 | self.y_ticks = []
49 |
50 | def plot_map(self, output: str = 'save', save_as: str = 'resultMap.png', show: bool = False):
51 | """
52 | Plot or save the map image.
53 |
54 | Args:
55 | output (str, optional): Type 'plot' to show the map or 'save' to save it. Default is 'save'.
56 | save_as (str, optional): Name and type of the resulting image. Default is 'resultMap.png'.
57 | show (bool, optional): Whether to show the plotted map. Default is False.
58 |
59 | Returns:
60 | None
61 | """
62 | self.get_ticks()
63 | fig, axis1 = plt.subplots(figsize=(30, 30))
64 | axis1.imshow(self.result_image)
65 | axis1.set_xlabel('Longitude')
66 | axis1.set_ylabel('Latitude')
67 | axis1.set_xticks(list(self.x_ticks))
68 | axis1.set_yticks(list(self.y_ticks))
69 | axis1.set_xticklabels(self.x_ticks)
70 | axis1.set_yticklabels(self.y_ticks)
71 | axis1.grid()
72 | if output == 'save':
73 | plt.savefig(save_as)
74 | else:
75 | plt.show()
76 |
77 | def create_image(self, color, width=1):
78 | """
79 | Create an image with the original map and GPS records.
80 |
81 | Args:
82 | color (tuple): Color of the GPS records (RGB values).
83 | width (int, optional): Width of the drawn GPS records. Default is 1.
84 |
85 | Returns:
86 | None
87 | """
88 | # data = pd.read_csv(self.data_path, names=['LATITUDE', 'LONGITUDE'], sep=',')
89 |
90 | self.result_image = Image.open(self.map_path, 'r')
91 | img_points = []
92 | # gps_data = tuple(zip(data['LATITUDE'].values, data['LONGITUDE'].values))
93 | for lat, long, eval in self.gps_data:
94 | x1, y1 = self.scale_to_img((lat, long), (self.result_image.size[0], self.result_image.size[1]))
95 | img_points.append((x1, y1))
96 | draw = ImageDraw.Draw(self.result_image)
97 | # draw.line(img_points, fill=color, width=width)
98 | for point in img_points:
99 | draw.ellipse((point[0] - width, point[1] - width, point[0] + width, point[1] + width), fill=color)
100 |
101 | def scale_to_img(self, lat_lon: tuple, h_w: tuple) -> tuple:
102 | """
103 | Convert latitude and longitude to image pixels.
104 |
105 | Args:
106 | lat_lon (tuple): GPS record to draw (lat1, lon1).
107 | h_w (tuple): Size of the map image (width, height).
108 |
109 | Returns:
110 | tuple: x and y coordinates to draw on the map image.
111 | """
112 | # https://gamedev.stackexchange.com/questions/33441/how-to-convert-a-number-from-one-min-max-set-to-another-min-max-set/33445
113 | old = (self.points[2], self.points[0])
114 | new = (0, h_w[1])
115 | y = ((lat_lon[0] - old[0]) * (new[1] - new[0]) / (old[1] - old[0])) + new[0]
116 | old = (self.points[1], self.points[3])
117 | new = (0, h_w[0])
118 | x = ((lat_lon[1] - old[0]) * (new[1] - new[0]) / (old[1] - old[0])) + new[0]
119 | # y must be reversed because the orientation of the image in the matplotlib.
120 | # image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
121 | return int(x), h_w[1] - int(y)
122 |
123 | def get_ticks(self):
124 | """
125 | Generate custom ticks based on the GPS coordinates of the map for the matplotlib output.
126 |
127 | Returns:
128 | None
129 | """
130 | self.x_ticks = map(
131 | lambda x: round(x, 4),
132 | np.linspace(self.points[1], self.points[3], num=7))
133 | y_ticks = map(
134 | lambda x: round(x, 4),
135 | np.linspace(self.points[2], self.points[0], num=8))
136 | # Ticks must be reversed because the orientation of the image in the matplotlib.
137 | # image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
138 | self.y_ticks = sorted(y_ticks, reverse=True)
139 |
--------------------------------------------------------------------------------
/colmap_wrapper/reconstruction/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2023 Lukas Meyer
3 | Licensed under the MIT License.
4 | See LICENSE file for more information.
5 | """
6 |
7 | from colmap_wrapper.reconstruction.recunstruction import COLMAPReconstruction
8 | from colmap_wrapper.reconstruction.camera_config import *
9 |
--------------------------------------------------------------------------------
/colmap_wrapper/reconstruction/camera_config.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2023 Lukas Meyer
3 | Licensed under the MIT License.
4 | See LICENSE file for more information.
5 | """
6 |
7 | # Built-in/Generic Imports
8 | # ...
9 |
10 | # Libs
11 | import pycolmap
12 |
13 |
14 | # Own modules
15 | # ...
16 |
17 |
18 | class CameraConfig(object):
19 | def __init__(self):
20 | self.image_size: tuple
21 | self.model: str
22 | self.camera_params: list
23 | self.camera: pycolmap.Camera
24 |
25 |
26 | class UnknownCamera(CameraConfig):
27 | def __init__(self):
28 | CameraConfig.__init__(self)
29 | self.image_size = (3200, 3200)
30 | self.model = 'SIMPLE_PINHOLE'
31 | self.camera_params = []
32 |
33 | self.camera = pycolmap.Camera(
34 | model=self.model,
35 | width=self.image_size[0],
36 | height=self.image_size[1],
37 | params=self.camera_params,
38 | )
39 |
40 |
41 | class P1Camera(CameraConfig):
42 | def __init__(self):
43 | CameraConfig.__init__(self)
44 | self.image_size = (8192, 5460)
45 | self.model = 'SIMPLE_PINHOLE'
46 | self.camera_params = []
47 |
48 | self.camera = pycolmap.Camera(
49 | model=self.model,
50 | width=self.image_size[0],
51 | height=self.image_size[1],
52 | params=self.camera_params,
53 | )
54 |
55 |
56 | class DSLRCamera(CameraConfig):
57 | def __init__(self):
58 | CameraConfig.__init__(self)
59 |
60 | self.image_size = (6000, 4000)
61 | self.model = 'OPENCV'
62 | self.camera_params = [4518.9, 4511.7, 3032.2, 2020.9, -0.1623, 0.0902, 0, 0]
63 |
64 | self.camera = pycolmap.Camera(
65 | model='OPENCV',
66 | width=self.image_size[0],
67 | height=self.image_size[1],
68 | params=self.camera_params,
69 | )
--------------------------------------------------------------------------------
/colmap_wrapper/reconstruction/recunstruction.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2023 Lukas Meyer
3 | Licensed under the MIT License.
4 | See LICENSE file for more information.
5 | """
6 |
7 | # Built-in/Generic Imports
8 | from pathlib import Path
9 | import json
10 | from copy import copy
11 | from typing import Literal, Union
12 | from dataclasses import dataclass
13 |
14 | # Libs
15 | import pycolmap
16 |
17 |
18 | # Own modules
19 | from colmap_wrapper.reconstruction.camera_config import *
20 |
21 |
22 | def is_jsonable(x):
23 | try:
24 | json.dumps(x)
25 | return True
26 | except (TypeError, OverflowError):
27 | return False
28 |
29 |
30 | @dataclass
31 | class COLMAPReconstruction(object):
32 | # Images input folder
33 | image_path: str
34 |
35 | # Output folder for reconstruction
36 | output_path: str
37 |
38 | # Define camera model
39 | camera_model: Literal[DSLRCamera, P1Camera, UnknownCamera]
40 |
41 | feature_mathing: Literal["exhaustive", "spatial"]
42 |
43 | # Max window size for patch match stereo
44 | patch_match_max_image_size: int = 4000
45 |
46 | # Max window size for stereo fusion
47 | stereo_fusion_max_image_size: int = 4000
48 |
49 | def __init__(self, images: str,
50 | output: str,
51 | feature_mathing: Literal["exhaustive", "spatial"],
52 | patch_match_max_image_size: int,
53 | stereo_fusion_max_image_size: int,
54 | camera_model: Literal[DSLRCamera, P1Camera, UnknownCamera]):
55 | """
56 | Initializes a COLMAPReconstruction object.
57 |
58 | Args:
59 | images (str): Path to the input images folder or a single image file.
60 | output (str): Path to the output folder for the reconstruction.
61 | feature_mathing (str): The type of feature matching to be used ("exhaustive" or "spatial").
62 | patch_match_max_image_size (int): Maximum window size for patch match stereo.
63 | stereo_fusion_max_image_size (int): Maximum window size for stereo fusion.
64 | camera_model (Literal[DSLRCamera, P1Camera, UnknownCamera]): The camera model to be used.
65 | """
66 | self.images_folder = Path(images).expanduser().resolve()
67 | project_files = (p.resolve() for p in Path(self.images_folder).glob("**/*") if
68 | p.suffix in {".jpg", ".jpeg", ".png", ".ppm"})
69 | self.projects = {}
70 | # Define default folders and multiple projects
71 | if len(list(project_files)) == 0:
72 | for project_idx, folder in enumerate(self.images_folder.glob("*")):
73 | reco_path = Path(output).expanduser().resolve() / str(project_idx)
74 | option_path = reco_path / "options"
75 |
76 | self.projects.update(
77 | {
78 | project_idx:
79 | {
80 | 'images': Path(folder),
81 | 'output': reco_path,
82 | 'sparse': reco_path / "sparse",
83 | 'mvs': reco_path / "dense",
84 | 'database': reco_path / "database.db",
85 | 'option': option_path
86 | }
87 | })
88 |
89 | reco_path.mkdir(parents=True, exist_ok=True)
90 | option_path.mkdir(exist_ok=True)
91 | else:
92 | reco_path = Path(output).expanduser().resolve()
93 | option_path = reco_path / "options"
94 |
95 | self.projects.update(
96 | {
97 | 0:
98 | {
99 | 'images': Path(images).expanduser().resolve(),
100 | 'output': reco_path,
101 | 'sparse': reco_path / "sparse",
102 | 'mvs': reco_path / "dense",
103 | 'database': reco_path / "database.db",
104 | 'option': option_path
105 | }
106 | })
107 | reco_path.mkdir(parents=True, exist_ok=True)
108 | option_path.mkdir(exist_ok=True)
109 |
110 | # Feature options
111 | self.sift_extraction_options = pycolmap.SiftExtractionOptions()
112 | # self.sift_extraction_options.max_image_size = -1
113 | # self.sift_extraction_options.max_num_features = 14000
114 | self.sift_matching_options = pycolmap.SiftMatchingOptions()
115 |
116 | # Feature Matching
117 | self.feature_mathing = feature_mathing
118 | if feature_mathing == "exhaustive":
119 | self.matching_options = pycolmap.ExhaustiveMatchingOptions()
120 | elif feature_mathing == "spatial":
121 | self.matching_options = pycolmap.SpatialMatchingOptions()
122 | self.matching_options.ignore_z = False
123 |
124 | # SfM Options
125 | self.incremental_mapping_options = pycolmap.IncrementalMapperOptions()
126 |
127 | # MVS Options
128 | self.patch_match_options = pycolmap.PatchMatchOptions()
129 | self.patch_match_options.window_radius = 8
130 | self.patch_match_options.num_iterations = 7
131 | self.patch_match_options.max_image_size = patch_match_max_image_size
132 | self.stereo_fusion_options = pycolmap.StereoFusionOptions()
133 | self.stereo_fusion_options.max_image_size = stereo_fusion_max_image_size
134 |
135 | self.camera_config = camera_model()
136 | self.camera = self.camera_config.camera
137 |
138 | # self.camera_dslr = pycolmap.Camera(
139 | # model='OPENCV',
140 | # width=6000,
141 | # height=4000,
142 | # params=[4518.9, 4511.7, 3032.2, 2020.9, -0.1623, 0.0902, 0, 0],
143 | # )
144 |
145 | self.camera_mode = pycolmap.CameraMode('SINGLE')
146 |
147 | if pycolmap.has_cuda:
148 | print("COLMAP Reconstruction uses CUDA.")
149 | else:
150 | print("COLMAP Reconstruction uses NO CUDA! Using CPU instead.")
151 |
152 | @staticmethod
153 | def save_options(output_file: Union[str, Path], options: dict):
154 | """
155 | Saves the options to a JSON file.
156 |
157 | Args:
158 | output_file (str): Path to the output JSON file.
159 | options (dict): Dictionary containing the options to be saved.
160 | """
161 | jsonable = is_jsonable(options)
162 |
163 | if jsonable:
164 | with open(output_file, 'w') as file:
165 | json.dump(options, file)
166 | else:
167 | raise TypeError("Option not JSON serializable: {}".format(options))
168 |
169 | @staticmethod
170 | def load_options(input_file: Union[str, Path]):
171 | """
172 | Loads the options from a JSON file.
173 |
174 | Args:
175 | input_file (str): Path to the input JSON file.
176 |
177 | Returns:
178 | dict or False: Loaded options as a dictionary, or False if the file does not exist.
179 | """
180 | options = None
181 |
182 | if input_file.exists():
183 |
184 | with open(input_file, 'r') as file:
185 | options = json.load(file)
186 | else:
187 | options = False
188 | return options
189 |
190 | def check_for_existing(self, input_file: Union[str, Path], current_options: dict):
191 | """
192 | Checks if the options exist or if the existing options differ from the current options.
193 |
194 | Args:
195 | input_file (str): Path to the input JSON file.
196 | current_options (dict): Dictionary containing the current options.
197 |
198 | Returns:
199 | bool: True if the options do not exist or differ, False otherwise.
200 | """
201 | loaded_options = self.load_options(input_file=input_file)
202 |
203 | if loaded_options:
204 | if loaded_options == current_options:
205 | return False
206 | else:
207 | return True
208 | return True
209 |
210 | def extract_features(self):
211 | """
212 | Extracts features from the input images using COLMAP.
213 | """
214 | for project_idx in self.projects.keys():
215 | options = copy(self.sift_extraction_options.todict())
216 | # Make jasonable
217 | options['normalization'] = options['normalization'].name
218 |
219 | ret = self.check_for_existing(self.projects[project_idx]['option'] / "feature_extraction_options.json",
220 | current_options=options)
221 |
222 | if ret:
223 | # Options do not exist or existing options differ from current
224 | pycolmap.extract_features(self.projects[project_idx]['database'],
225 | self.projects[project_idx]['images'],
226 | camera_mode=self.camera_mode,
227 | device=pycolmap.Device.auto,
228 | sift_options=self.sift_extraction_options)
229 |
230 | self.save_options(self.projects[project_idx]['option'] / "feature_extraction_options.json",
231 | options)
232 | else:
233 | print('Feature extraction skipped. Options have not changed. Access database data...')
234 | # Load existing sift features from database
235 | # import_images(self.images_folder, self.database_path, self.camera_mode)
236 | # image_ids = get_image_ids(self.database_path)
237 | # import_features(image_ids, self.database_path, features)
238 |
239 | def feature_matcher(self):
240 | """
241 | Performs feature matching using COLMAP.
242 | """
243 | for project_idx in self.projects.keys():
244 | options = copy({**self.matching_options.todict(), **self.sift_matching_options.todict()})
245 | ret = self.check_for_existing(self.projects[project_idx]['option'] / "feature_matching_options.json",
246 | current_options=options)
247 |
248 | if ret:
249 | if self.feature_mathing == 'exhaustive':
250 | matcher = pycolmap.match_exhaustive
251 | elif self.feature_mathing == 'spatial':
252 | matcher = pycolmap.match_spatial
253 | else:
254 | raise ValueError('Feature matcher is unknown: {}'.format(self.feature_mathing))
255 |
256 | # Options do not exist or existing options differ from current
257 | matcher(database_path=self.projects[project_idx]['database'],
258 | device=pycolmap.Device.auto,
259 | sift_options=self.sift_matching_options,
260 | matching_options=self.matching_options)
261 | self.save_options(self.projects[project_idx]['option'] / "feature_matching_options.json", options)
262 |
263 | else:
264 | print('Exhaustive feature matching skipped. Options have not changed. Access database data...')
265 |
266 | # Load existing matches from database
267 | # import_matches()
268 |
269 | def incremental_sfm(self):
270 | """
271 | Performs incremental structure from motion (SfM) using COLMAP.
272 | """
273 | for project_idx in self.projects.keys():
274 | options = copy(self.incremental_mapping_options.todict())
275 | options["image_names"] = []
276 | ret = self.check_for_existing(self.projects[project_idx]['option'] / "incremental_sfm_options.json",
277 | current_options=options)
278 | if ret:
279 | maps = pycolmap.incremental_mapping(database_path=self.projects[project_idx]['database'],
280 | image_path=self.images_folder,
281 | output_path=self.projects[project_idx]['sparse'],
282 | options=self.incremental_mapping_options)
283 | self.save_options(self.projects[project_idx]['option'] / "incremental_sfm_options.json", options)
284 | maps[0].write(self.projects[project_idx]['sparse'])
285 |
286 | else:
287 | print('Incremental mapping skipped. Options have not changed. Access sparse file data...')
288 |
289 | def undistort_images(self):
290 | """
291 | Undistorts the input images using COLMAP.
292 | """
293 | for project_idx in self.projects.keys():
294 | if not (self.projects[project_idx]['mvs'] / "images").exists():
295 | pycolmap.undistort_images(self.projects[project_idx]['mvs'],
296 | self.projects[project_idx]['sparse'],
297 | self.projects[project_idx]['images'])
298 | else:
299 | print('Images already undistorted! Skipping...')
300 |
301 | def patch_match_stereo(self):
302 | """
303 | Undistorts the input images using COLMAP.
304 | """
305 | for project_idx in self.projects.keys():
306 | options = copy(self.patch_match_options.todict())
307 | ret = self.check_for_existing(self.projects[project_idx]['option'] / "patch_match_stereo_options.json",
308 | current_options=options)
309 |
310 | if ret:
311 | pycolmap.patch_match_stereo(self.projects[project_idx]['mvs'], options=self.patch_match_options)
312 | self.save_options(self.projects[project_idx]['option'] / "patch_match_stereo_options.json", options)
313 | else:
314 | print('Patch match stereo images already processed. Skipping...')
315 |
316 | def stereo_fusion(self):
317 | """
318 | Performs stereo fusion using COLMAP.
319 | """
320 | for project_idx in self.projects.keys():
321 | self.stereo_fusion_options.num_threads = min(16, os.cpu_count())
322 | options = copy(self.stereo_fusion_options.todict())
323 | options['bounding_box'] = str([list(array) for array in options['bounding_box']])
324 | is_jsonable(options)
325 | ret = self.check_for_existing(self.projects[project_idx]['option'] / "stereo_fusion_options.json",
326 | current_options=options)
327 |
328 | dense_model_path = self.projects[project_idx]['mvs'] / "fused.ply"
329 | if ret:
330 | pycolmap.stereo_fusion(output_path=dense_model_path,
331 | workspace_path=self.projects[project_idx]['mvs'],
332 | workspace_format='COLMAP',
333 | pmvs_option_name='option-all',
334 | input_type='geometric', options=self.stereo_fusion_options)
335 | self.save_options(self.projects[project_idx]['option'] / "stereo_fusion_options.json", options)
336 | else:
337 | print('Stereo images already fused to model. Skipping...')
338 |
339 | def run(self):
340 | """
341 | Runs the complete reconstruction process using COLMAP.
342 | """
343 | # Sparse Reconstruction / SFM - Structure from Motion
344 | self.extract_features()
345 | self.feature_matcher()
346 | self.incremental_sfm()
347 |
348 | # Dense Reconstruction / MVS - Multi View Stereo
349 | self.undistort_images()
350 | self.patch_match_stereo()
351 | self.stereo_fusion()
352 |
353 |
354 | if __name__ == '__main__':
355 | from colmap_wrapper.data.download import *
356 | from colmap_wrapper.visualization import ColmapVisualization
357 | from colmap_wrapper.dataloader import COLMAPLoader
358 |
359 | downloader = Dataset()
360 | downloader.download_bunny_images_dataset()
361 |
362 | reconstruction = COLMAPReconstruction(images=downloader.file_path,
363 | output='./test_reco',
364 | feature_mathing='exhaustive',
365 | patch_match_max_image_size=400,
366 | stereo_fusion_max_image_size=400,
367 | camera_model=UnknownCamera)
368 | reconstruction.run()
369 |
370 | project = COLMAPLoader(project_path=reconstruction.projects[0]['output'])
371 |
372 | project_vs = ColmapVisualization(colmap=project, image_resize=0.4)
373 | project_vs.visualization(frustum_scale=0.8, image_type='image', point_size=1.5)
374 |
--------------------------------------------------------------------------------
/colmap_wrapper/reconstruction/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/reconstruction/test.png
--------------------------------------------------------------------------------
/colmap_wrapper/test/result_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/test/result_map.png
--------------------------------------------------------------------------------
/colmap_wrapper/test/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/test/test.png
--------------------------------------------------------------------------------
/colmap_wrapper/test/test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 | import numpy as np
9 |
10 | # Own Modules
11 | from colmap_wrapper.data.download import Dataset
12 | from colmap_wrapper.dataloader import COLMAPLoader
13 | from colmap_wrapper.visualization import ColmapVisualization
14 |
15 | if __name__ == '__main__':
16 | downloader = Dataset()
17 | downloader.download_bunny_reco_dataset()
18 |
19 | project = COLMAPLoader(project_path=downloader.file_path)
20 |
21 | colmap_project = project.project
22 |
23 | camera = colmap_project.cameras
24 | images = colmap_project.images
25 | sparse = colmap_project.get_sparse()
26 | dense = colmap_project.get_dense()
27 |
28 | # project_vs = ColmapVisualization(dataloader=colmap_project, bg_color=np.asarray([0, 0, 0]))
29 | # project_vs.visualization(frustum_scale=0.4, image_type='image', point_size=0.001)
30 |
31 | print('Finished')
32 |
--------------------------------------------------------------------------------
/colmap_wrapper/test/test2sides.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 | import numpy as np
9 |
10 | # Own Modules
11 | from colmap_wrapper.data.download import Dataset
12 | from colmap_wrapper.colmap.colmap import COLMAP
13 | from colmap_wrapper.visualization import ColmapVisualization
14 | from colmap_wrapper import USER_NAME
15 |
16 | if __name__ == '__main__':
17 | project = COLMAP(project_path='/media/{}/Samsung_T5/For5G/reco/23_03_17/03'.format(USER_NAME), dense_pc='fused.ply')
18 |
19 | colmap_project = project.project
20 |
21 | camera = colmap_project.cameras
22 | images = colmap_project.images
23 | sparse = colmap_project.get_sparse()
24 | dense = colmap_project.get_dense()
25 |
26 | for COLMAP_MODEL in project.projects:
27 | project_vs = ColmapVisualization(colmap=COLMAP_MODEL, bg_color=np.asarray([0, 0, 0]))
28 | project_vs.visualization(frustum_scale=0.4, image_type='image', point_size=0.001)
29 |
30 | print('Finished')
31 |
--------------------------------------------------------------------------------
/colmap_wrapper/test/testGPS.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 | import copy
9 |
10 | import numpy as np
11 | import pylab as plt
12 | import open3d as o3d
13 |
14 | # Own Modules
15 | from colmap_wrapper.gps.visualization import GPSVisualization
16 | from colmap_wrapper.colmap.colmap import COLMAP
17 | from colmap_wrapper.visualization import ColmapVisualization
18 | from colmap_wrapper import USER_NAME
19 |
20 |
21 | def points2pcd(points: np.ndarray):
22 | '''
23 | Convert a numpy array to an open3d point cloud. Just for convenience to avoid converting it every single time.
24 | Assigns blue color uniformly to the point cloud.
25 |
26 | :param points: Nx3 array with xyz location of points
27 | :return: a blue open3d.geometry.PointCloud()
28 | '''
29 |
30 | colors = [[0, 0, 1] for i in range(points.shape[0])]
31 | pcd = o3d.geometry.PointCloud()
32 | pcd.points = o3d.utility.Vector3dVector(points)
33 | pcd.colors = o3d.utility.Vector3dVector(colors)
34 |
35 | return pcd
36 |
37 |
38 | def kabsch_umeyama(pointset_A, pointset_B):
39 | """
40 | Kabsch–Umeyama algorithm is a method for aligning and comparing the similarity between two sets of points.
41 | It finds the optimal translation, rotation and scaling by minimizing the root-mean-square deviation (RMSD)
42 | of the point pairs.
43 |
44 | Source and Explenation: https://zpl.fi/aligning-point-patterns-with-kabsch-umeyama-algorithm/
45 |
46 | @param pointset_A: array of a set of points in n-dim
47 | @param pointset_B: array of a set of points in n-dim
48 | @return: Rotation Matrix (3x3), scaling (scalar) translation vector (3x1)
49 | """
50 | assert pointset_A.shape == pointset_B.shape
51 | n, m = pointset_A.shape
52 |
53 | # Find centroids of both point sets
54 | EA = np.mean(pointset_A, axis=0)
55 | EB = np.mean(pointset_B, axis=0)
56 |
57 | VarA = np.mean(np.linalg.norm(pointset_A - EA, axis=1) ** 2)
58 |
59 | # Covariance matrix
60 | H = ((pointset_A - EA).T @ (pointset_B - EB)) / n
61 |
62 | # SVD H = UDV^T
63 | U, D, VT = np.linalg.svd(H)
64 |
65 | # Detect and prevent reflection
66 | d = np.sign(np.linalg.det(U) * np.linalg.det(VT))
67 | S = np.diag([1] * (m - 1) + [d])
68 |
69 | # rotation, scaling and translation
70 | R = U @ S @ VT
71 | c = VarA / np.trace(np.diag(D) @ S)
72 | t = EA - c * R @ EB
73 |
74 | return R, c, t
75 |
76 |
77 | def get_cartesian(lat=None, lon=None, elevation=None):
78 | """
79 | https://itecnote.com/tecnote/python-how-to-convert-longitudelatitude-elevation-to-cartesian-coordinates/
80 | """
81 | lat, lon = np.deg2rad(lat), np.deg2rad(lon)
82 | R = 6378137.0 + elevation # radius of the earth
83 | x = R * np.cos(lat) * np.cos(lon)
84 | y = R * np.cos(lat) * np.sin(lon)
85 | z = R * np.sin(lat)
86 | return x, y, z
87 |
88 |
89 | def visualize_gps_osm(project, map_path, osm_boarders, save_as: str, show: bool = False):
90 | gps_data = {}
91 | extrinsics = {}
92 | for model_idx, model in enumerate(project.projects):
93 | gps_data.update({model_idx: []})
94 | extrinsics.update({model_idx: []})
95 | for image_idx in model.images.keys():
96 | gps_data[model_idx].append([float(model.images[image_idx].exifdata["XMP:GPSLatitude"]),
97 | float(model.images[image_idx].exifdata["XMP:GPSLongitude"]),
98 | float(model.images[image_idx].exifdata["XMP:AbsoluteAltitude"])])
99 | extrinsics[model_idx].append(model.images[image_idx].extrinsics)
100 |
101 | # Show gps --> x and y
102 | gps_certesian_0 = np.asarray([get_cartesian(lat, long, elev) for lat, long, elev in gps_data[0]])
103 | plt.scatter(gps_certesian_0[:, 0], gps_certesian_0[:, 1])
104 |
105 | gps_certesian_1 = np.asarray([get_cartesian(lat, long, elev) for lat, long, elev in gps_data[1]])
106 | plt.scatter(gps_certesian_1[:, 0], gps_certesian_1[:, 1])
107 | plt.show()
108 |
109 | # Show extrinsics (translational part)
110 | trans_certesian_0 = np.asarray([[array[:3, -1][0], array[:3, -1][1], array[:3, -1][2]] for array in extrinsics[0]])
111 | plt.scatter(trans_certesian_0[:, 0], trans_certesian_0[:, 1])
112 |
113 | trans_certesian_1 = np.asarray([[array[:3, -1][0], array[:3, -1][1], array[:3, -1][2]] for array in extrinsics[1]])
114 | plt.scatter(trans_certesian_1[:, 0], trans_certesian_1[:, 1])
115 | plt.show()
116 |
117 | gps_data_osm = []
118 | for key in gps_data.keys():
119 | gps_data_osm.extend(gps_data[key])
120 |
121 | vis = GPSVisualization(gps_data_osm, map_path=map_path,
122 | points=osm_boarders) # Draw converted records to the map image.
123 |
124 | vis.create_image(color=(0, 0, 255), width=0.2)
125 | vis.plot_map(show=True, save_as=save_as)
126 |
127 | return [gps_certesian_0, gps_certesian_1], [trans_certesian_0, trans_certesian_1]
128 |
129 |
130 | if __name__ == '__main__':
131 | if False:
132 | path = "/home/{}/Documents/reco/23_04_14/01".format(USER_NAME)
133 | img_orig = None
134 | else:
135 | path = '/media/{}/Samsung_T5/For5G/reco/23_03_17/03'.format(USER_NAME)
136 | img_orig = '/media/{}/Samsung_T5/For5G/data/23_03_17/03'.format(USER_NAME)
137 | project = COLMAP(project_path=path,
138 | exif_read=True,
139 | img_orig=img_orig,
140 | dense_pc='cropped.ply')
141 |
142 | colmap_project = project.project
143 |
144 | camera = colmap_project.cameras
145 | images = colmap_project.images
146 | sparse = colmap_project.get_sparse()
147 | dense = colmap_project.get_dense()
148 |
149 | for COLMAP_MODEL in project.projects:
150 | project_vs = ColmapVisualization(colmap=COLMAP_MODEL, bg_color=np.asarray([0, 0, 0]))
151 | project_vs.visualization(frustum_scale=0.4, image_type='image', point_size=0.001)
152 |
153 | gps, extrinsic = visualize_gps_osm(project=project,
154 | map_path='../gps/map.png',
155 | osm_boarders=(49.66721, 11.32313, 49.66412, 11.32784),
156 | save_as='./result_map.png',
157 | show=True)
158 |
159 | gps_0 = gps[0] - np.mean(gps[0], axis=0)
160 | gps_1 = gps[1] - np.mean(gps[0], axis=0)
161 |
162 | R_0, c_0, t_0 = kabsch_umeyama(gps_0, extrinsic[0])
163 | R_1, c_1, t_1 = kabsch_umeyama(gps_1, extrinsic[1])
164 |
165 | # Plot GPS
166 | gps_pcd_0 = points2pcd((np.asarray(gps_0)))
167 | gps_pcd_0.paint_uniform_color((1, 0, 0))
168 | gps_pcd_1 = points2pcd((np.asarray(gps_1)))
169 | gps_pcd_1.paint_uniform_color((0, 0, 1))
170 | o3d.visualization.draw_geometries([gps_pcd_0, gps_pcd_1])
171 |
172 | # Plot Camera
173 | extrinsic_pcd_0 = points2pcd((np.asarray(extrinsic[0])))
174 | extrinsic_pcd_0.paint_uniform_color((0, 1, 0))
175 | extrinsic_pcd_1 = points2pcd((np.asarray(extrinsic[1])))
176 | extrinsic_pcd_1.paint_uniform_color((0, 1, 1))
177 | o3d.visualization.draw_geometries([extrinsic_pcd_0, extrinsic_pcd_1])
178 |
179 | extrinsic_pcd_oriented_0 = copy.deepcopy(extrinsic_pcd_0)
180 | extrinsic_pcd_oriented_1 = copy.deepcopy(extrinsic_pcd_1)
181 |
182 | I_0 = np.eye(4)
183 | I_0[:3, :3] = c_0 * R_0
184 | I_0[:3, -1] = t_0
185 |
186 | extrinsic_pcd_oriented_0 = extrinsic_pcd_oriented_0.transform(I_0)
187 |
188 | # extrinsic_pcd_oriented_0 = extrinsic_pcd_oriented_0.rotate(R_0)
189 | # extrinsic_pcd_oriented_0 = extrinsic_pcd_oriented_0.scale(c_0, center=(0, 0, 0))
190 | # extrinsic_pcd_oriented_0 = extrinsic_pcd_oriented_0.translate(t_0)
191 |
192 | I_1 = np.eye(4)
193 | I_1[:3, :3] = c_1 * R_1
194 | I_1[:3, -1] = t_1
195 |
196 | extrinsic_pcd_oriented_1 = extrinsic_pcd_oriented_1.transform(I_1)
197 |
198 | # extrinsic_pcd_oriented_1 = extrinsic_pcd_oriented_1.rotate(R_1)
199 | # extrinsic_pcd_oriented_1 = extrinsic_pcd_oriented_1.scale(c_1, center=(0, 0, 0))
200 | # extrinsic_pcd_oriented_1 = extrinsic_pcd_oriented_1.translate(t_1)
201 |
202 | # Aligned gps and poses
203 | o3d.visualization.draw_geometries([extrinsic_pcd_oriented_0, extrinsic_pcd_oriented_1, gps_pcd_0, gps_pcd_1])
204 |
205 | # Align dense pcd
206 | side_0 = copy.deepcopy(project.projects[0].dense)
207 | side_1 = copy.deepcopy(project.projects[1].dense)
208 |
209 | # o3d.visualization.draw_geometries([side_0, side_1, extrinsic_pcd_0, extrinsic_pcd_1])
210 |
211 | side_0 = side_0.transform(I_0)
212 | # side_0 = side_0.rotate(R_0)
213 | # side_0 = side_0.scale(c_0, center=(0, 0, 0))
214 | # side_0 = side_0.translate(t_0)
215 |
216 | side_1 = side_1.transform(I_1)
217 | # side_1 = side_1.rotate(R_1)
218 | # side_1 = side_1.scale(c_1, center=(0, 0, 0))
219 | # side_1 = side_1.translate(t_1)
220 | o3d.visualization.draw_geometries([side_0, side_1])
221 |
222 | # project.projects[1].dense = project.projects[1].dense.scale(c_1)
223 |
224 | o3d.visualization.draw_geometries(
225 | [side_0, side_1, extrinsic_pcd_oriented_0, extrinsic_pcd_oriented_1, gps_pcd_0, gps_pcd_1])
226 |
227 | o3d.io.write_point_cloud('side_0.ply', side_0)
228 | o3d.io.write_point_cloud('side_1.ply', side_1)
229 |
230 | for COLMAP_MODEL in project.projects:
231 | project_vs = ColmapVisualization(colmap=COLMAP_MODEL, bg_color=np.asarray([0, 0, 0]))
232 | project_vs.visualization(frustum_scale=0.4, image_type='image', point_size=0.001)
233 |
234 | print('Finished')
235 |
--------------------------------------------------------------------------------
/colmap_wrapper/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from colmap_wrapper.visualization.visualization import *
5 | from colmap_wrapper.visualization.visualization_photogrammetry_software import *
--------------------------------------------------------------------------------
/colmap_wrapper/visualization/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meyerls/colmap-wrapper/b86b87736f98e21d8f2e3f936d7b01edc0d998b3/colmap_wrapper/visualization/test.png
--------------------------------------------------------------------------------
/colmap_wrapper/visualization/visualization.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | Copyright (c) 2022 Lukas Meyer
6 | Licensed under the MIT License.
7 | See LICENSE file for more information.
8 | """
9 |
10 | # Built-in/Generic Imports
11 | from typing import Tuple
12 |
13 | # Libs
14 | import numpy as np
15 | import open3d as o3d
16 | from PIL import Image, ImageFont, ImageDraw
17 | from pyquaternion import Quaternion
18 |
19 | # Own modules
20 | # ...
21 |
22 |
23 | '''
24 | # Plot/show text of distance in 3 Reco. Currently not working as text is rotated wrongly. tbd!
25 |
26 | pos_text = (self.aruco_corners_3d[0] + (
27 | self.aruco_corners_3d[1] - self.aruco_corners_3d[0]) / 2) * self.scale_factor
28 | pcd_tree = o3d.geometry.KDTreeFlann(self.dense_scaled)
29 | [k, idx, _] = pcd_tree.search_knn_vector_3d(pos_text, 100)
30 |
31 | dir_vec = []
32 | for i in idx:
33 | dir_vec.append(self.dense_scaled.normals[i])
34 | dir_vec = np.asarray(dir_vec).mean(axis=0)
35 |
36 | dist = np.linalg.norm(
37 | self.aruco_corners_3d[0] * self.scale_factor - self.aruco_corners_3d[1] * self.scale_factor)
38 | pcd_text = text_3d(text='{:.4f} cm'.format(dist*100),
39 | pos=pos_text,
40 | direction=dir_vec)
41 | geometries.append(pcd_text)
42 |
43 | '''
44 |
45 |
46 | def text_3d(text, pos, direction=None, density=10, degree=0.0, font="arial.ttf", font_size=16):
47 | """
48 | Source: https://github.com/isl-org/Open3D/issues/2
49 |
50 | Generate a 3D text point cloud used for visualization.
51 | :param text: content of the text
52 | :param pos: 3D xyz position of the text upper left corner
53 | :param direction: 3D normalized direction of where the text faces
54 | :param degree: in plane rotation of text
55 | :param font: Name of the font - change it according to your system
56 | :param font_size: radius of the font
57 | :return: o3d.geoemtry.PointCloud object
58 | """
59 | if direction is None:
60 | direction = (0., 0., 1.)
61 |
62 | font_obj = ImageFont.truetype(font, font_size * density)
63 | font_dim = font_obj.getsize(text)
64 |
65 | img = Image.new('RGB', font_dim, color=(255, 255, 255))
66 | draw = ImageDraw.Draw(img)
67 | draw.text((0, 0), text, font=font_obj, fill=(0, 0, 0))
68 | img = np.asarray(img)
69 | img_mask = img[:, :, 0] < 128
70 | indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T
71 |
72 | pcd = o3d.geometry.PointCloud()
73 | pcd.colors = o3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)
74 | pcd.points = o3d.utility.Vector3dVector(indices / 1000.0 / density)
75 |
76 | raxis = np.cross([0.0, 0.0, 1.0], direction)
77 | if np.linalg.norm(raxis) < 1e-6:
78 | raxis = (0.0, 0.0, 1.0)
79 | trans = (Quaternion(axis=raxis, radians=np.arccos(direction[2])) *
80 | Quaternion(axis=direction, degrees=degree)).transformation_matrix
81 | trans[0:3, 3] = np.asarray(pos)
82 | pcd.transform(trans)
83 | return pcd
84 |
85 |
86 | def create_sphere_mesh(t: np.ndarray, color: list, radius: float) -> list:
87 | """
88 | Creates a sphere mesh, is translated to a parsed 3D coordinate and has uniform color
89 |
90 | @param t: 3D Coordinate. Either 1 coordinate ore multiple
91 | @param color: rgb color ranging between 0 and 1.
92 | @param radius: radius of the sphere
93 | :return:
94 | """
95 | # Only one point
96 | if t.shape.__len__() == 1:
97 | t = np.expand_dims(t, axis=0)
98 | color = [color]
99 |
100 | sphere_list = []
101 |
102 | for p, c in zip(t, color):
103 | sphere = o3d.geometry.TriangleMesh.create_sphere(radius=radius)
104 | sphere.translate(p)
105 | sphere.paint_uniform_color(np.asarray(c))
106 | sphere.compute_triangle_normals()
107 |
108 | sphere_list.append(sphere)
109 |
110 | return sphere_list
111 |
112 |
113 | def generate_line_set(points: list, lines: list, color: list) -> o3d.pybind.geometry.LineSet:
114 | '''
115 | Generates a line set of parsed points, with uniform color.
116 |
117 | :param points: points of lines
118 | :param lines: list of connections
119 | :param color: rgb color ranging between 0 and 1.
120 | :return:
121 | '''
122 | colors = [color for i in range(len(lines))]
123 | line_set = o3d.geometry.LineSet(
124 | points=o3d.utility.Vector3dVector(points),
125 | lines=o3d.utility.Vector2iVector(lines),
126 | )
127 | line_set.colors = o3d.utility.Vector3dVector(colors)
128 |
129 | return line_set
130 |
131 |
132 | def draw_camera_plane(extrinsics, intrinsics, scale):
133 | '''
134 | Draw camera/image plane inside the camera frustum.
135 |
136 | :param extrinsics:
137 | :param intrinsics:
138 | :param scale:
139 | :return:
140 | '''
141 |
142 | # Extrinsic parameters
143 | R, t = extrinsics[:3, :3], extrinsics[:3, 3]
144 |
145 | # intrinsic points
146 | fx, fy, cx, cy = intrinsics[0, 0], intrinsics[1, 1], intrinsics[0, 2], intrinsics[1, 2]
147 |
148 | # Normalize to 1
149 | max_norm = max(fx, fy, cx, cy)
150 |
151 | # Define plane corner points
152 | points_camera_plane = [
153 | t + (np.asarray([cx, cy, fx]) * scale) / max_norm @ R.T,
154 | t + (np.asarray([cx, -cy, fx]) * scale) / max_norm @ R.T,
155 | t + (np.asarray([-cx, -cy, fx]) * scale) / max_norm @ R.T,
156 | t + (np.asarray([-cx, cy, fx]) * scale) / max_norm @ R.T,
157 | ]
158 |
159 | # Define line connections
160 | lines = [[0, 1], [1, 2], [2, 3], [3, 0], ]
161 | line_set = generate_line_set(points=points_camera_plane,
162 | lines=lines,
163 | color=[1, 0, 0])
164 |
165 | return line_set, points_camera_plane
166 |
167 |
168 | def draw_camera_viewport(extrinsics: np.ndarray, intrinsics: np.ndarray, image=None, scale=1) \
169 | -> Tuple[
170 | o3d.pybind.geometry.LineSet,
171 | o3d.pybind.geometry.TriangleMesh,
172 | o3d.pybind.geometry.TriangleMesh]:
173 | '''
174 |
175 | :param extrinsics:
176 | :param intrinsics:
177 | :param image_path:
178 | :param scale:
179 | :return:
180 | '''
181 |
182 | # Extrinsic parameters
183 | R, t = extrinsics[:3, :3], extrinsics[:3, 3]
184 |
185 | # intrinsic points
186 | fx, fy, cx, cy = intrinsics[0, 0], intrinsics[1, 1], intrinsics[0, 2], intrinsics[1, 2]
187 |
188 | # Normalize to 1
189 | max_norm = max(fx, fy, cx, cy)
190 |
191 | points = [
192 | t,
193 | t + (np.asarray([cx, cy, fx]) * scale) / max_norm @ R.T,
194 | t + (np.asarray([cx, -cy, fx]) * scale) / max_norm @ R.T,
195 | t + (np.asarray([-cx, -cy, fx]) * scale) / max_norm @ R.T,
196 | t + (np.asarray([-cx, cy, fx]) * scale) / max_norm @ R.T,
197 | ]
198 |
199 | lines = [[0, 1], [0, 2], [0, 3], [0, 4], [1, 2], [2, 3], [3, 4], [4, 1]]
200 | line_set = generate_line_set(points=points, lines=lines, color=[1, 0, 0])
201 |
202 | sphere = create_sphere_mesh(t=t, color=[1, 0, 0], radius=0.01)
203 |
204 | # Fill image plane/mesh with image as texture
205 | if isinstance(image, np.ndarray):
206 |
207 | # Create Point Cloud and assign a normal vector pointing in the opposite direction of the viewing normal
208 | pcd = o3d.geometry.PointCloud()
209 | pcd.points = o3d.utility.Vector3dVector(np.asarray(points[1:]))
210 | normal_vec = - (np.asarray([0, 0, fx]) @ R.T)
211 | pcd.normals = o3d.utility.Vector3dVector(np.tile(normal_vec, (pcd.points.__len__(), 1)))
212 |
213 | # Create image plane with image as texture
214 | plane = o3d.geometry.TriangleMesh()
215 | plane.vertices = pcd.points
216 | plane.triangles = o3d.utility.Vector3iVector(np.asarray([[0, 1, 3],
217 | [1, 2, 3]]))
218 | plane.compute_vertex_normals()
219 | v_uv = np.asarray([[1, 1],
220 | [1, 0],
221 | [0, 1],
222 | [1, 0],
223 | [0, 0],
224 | [0, 1]])
225 | plane.triangle_uvs = o3d.utility.Vector2dVector(v_uv)
226 | plane.triangle_material_ids = o3d.utility.IntVector([0] * 2)
227 | plane.textures = [o3d.geometry.Image(image)]
228 | else:
229 | plane = o3d.geometry.TriangleMesh()
230 | print('Blank')
231 |
232 | return line_set, sphere, plane
233 |
--------------------------------------------------------------------------------
/colmap_wrapper/visualization/visualization_photogrammetry_software.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | # Built-in/Generic Imports
10 | import open3d as o3d
11 | import numpy as np
12 | from tqdm import tqdm
13 | from typing import Union
14 |
15 | # Libs
16 | from colmap_wrapper.dataloader import COLMAPLoader, COLMAPProject
17 | from colmap_wrapper.dataloader.project import PhotogrammetrySoftware
18 | from colmap_wrapper.visualization import draw_camera_viewport
19 |
20 |
21 | class PhotogrammetrySoftwareVisualization(object):
22 | def __init__(self, photogrammetry_software: PhotogrammetrySoftware, image_resize: float = 0.3):
23 | if type(photogrammetry_software) == COLMAPLoader:
24 | self.photogrammetry_software = photogrammetry_software.project_list
25 | elif type(photogrammetry_software) == COLMAPProject:
26 | self.photogrammetry_software = [photogrammetry_software]
27 | self.geometries = []
28 |
29 | self.image_resize: float = image_resize
30 |
31 | def show_sparse(self):
32 | for project in self.photogrammetry_software:
33 | o3d.visualization.draw_geometries([project.get_sparse()])
34 |
35 | def show_dense(self):
36 | for project in self.photogrammetry_software:
37 | o3d.visualization.draw_geometries([project.get_dense()])
38 |
39 |
40 | class ColmapVisualization(PhotogrammetrySoftwareVisualization):
41 | def __init__(self, colmap: Union[COLMAPLoader, COLMAPProject], bg_color: np.ndarray = np.asarray([1, 1, 1]),
42 | image_resize: float = 0.3):
43 | super().__init__(colmap, image_resize=image_resize)
44 |
45 | self.vis_bg_color = bg_color
46 |
47 | def add_colmap_dense2geometrie(self):
48 | for project in self.photogrammetry_software:
49 | if np.asarray(project.get_dense().points).shape[0] == 0:
50 | return False
51 |
52 | self.geometries.append(project.get_dense())
53 |
54 | return True
55 |
56 | def add_colmap_sparse2geometrie(self):
57 | for project in self.photogrammetry_software:
58 | if np.asarray(project.get_sparse().points).shape[0] == 0:
59 | return False
60 |
61 | self.geometries.append(project.get_sparse())
62 | return True
63 |
64 | def add_colmap_frustums2geometrie(self, frustum_scale: float = 1., image_type: str = 'image'):
65 | """
66 | @param image_type:
67 | @type frustum_scale: object
68 | """
69 | # aa = []
70 | # for image_idx in tqdm(self.photogrammetry_software.images.keys()):
71 | # aa.append(self.photogrammetry_software.images[image_idx].extrinsics[:3, 3])
72 | #
73 | # a = o3d.geometry.PointCloud()
74 | # a.points = o3d.utility.Vector3dVector(np.vstack(aa))
75 | # coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=np.asarray([0., 0., 0.]))
76 | # o3d.visualization.draw_geometries([coordinate_frame, a])
77 |
78 | for project in self.photogrammetry_software:
79 | geometries = []
80 | for image_idx in tqdm(project.images.keys()):
81 |
82 | if image_type == 'image':
83 | image = project.images[image_idx].getData(self.image_resize)
84 | elif image_type == 'depth_geo':
85 | import cv2
86 | image = project.images[image_idx].depth_image_geometric
87 | min_depth, max_depth = np.percentile(image, [5, 95])
88 | image[image < min_depth] = min_depth
89 | image[image > max_depth] = max_depth
90 | image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
91 | image = (image / project.max_depth_scaler * 255).astype(
92 | np.uint8) # TODO max_depth_scaler
93 | image = cv2.applyColorMap(image, cv2.COLORMAP_HOT)
94 | elif image_type == 'depth_photo':
95 | import cv2
96 | image = project.images[image_idx].depth_image_photometric
97 | min_depth, max_depth = np.percentile(
98 | image, [5, 95])
99 | image[image < min_depth] = min_depth
100 | image[image > max_depth] = max_depth
101 | image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
102 | image = (image / project.max_depth_scaler_photometric * 255).astype(
103 | np.uint8) # TODO max_depth_scaler_photometric
104 | else:
105 | raise ValueError('Unknown image type')
106 |
107 | line_set, sphere, mesh = draw_camera_viewport(
108 | extrinsics=project.images[image_idx].extrinsics,
109 | intrinsics=project.images[image_idx].intrinsics.K,
110 | image=image,
111 | scale=frustum_scale)
112 |
113 | geometries.append(mesh)
114 | geometries.append(line_set)
115 | geometries.extend(sphere)
116 |
117 | self.geometries.extend(geometries)
118 |
119 | def visualization(self,
120 | show_dense: bool = True,
121 | show_sparse: bool = True,
122 | show_frustums: bool = True,
123 | frustum_scale: float = 1.,
124 | point_size: float = 1.,
125 | image_type: str = 'image',
126 | title: str = "Open3D Visualizer",
127 | window_size: tuple = (1920, 1080)):
128 | """
129 | @param frustum_scale:
130 | @param point_size:
131 | @param image_type: ['image, depth_geo', 'depth_photo']
132 | @param title:
133 | @param window_size:
134 | """
135 | image_types = ['image', 'depth_geo', 'depth_photo']
136 |
137 | if image_type not in image_types:
138 | raise TypeError('image type is {}. Only {} is allowed'.format(image_type, image_types))
139 |
140 | if show_dense:
141 | self.add_colmap_dense2geometrie()
142 | if show_sparse:
143 | self.add_colmap_sparse2geometrie()
144 | if show_frustums:
145 | self.add_colmap_frustums2geometrie(frustum_scale=frustum_scale, image_type=image_type)
146 | self.start_visualizer(point_size=point_size, title=title, size=window_size)
147 |
148 | def start_visualizer(self,
149 | point_size: float,
150 | title: str = "Open3D Visualizer",
151 | size: tuple = (1920, 1080)):
152 | viewer = o3d.visualization.Visualizer()
153 | viewer.create_window(window_name=title, width=size[0], height=size[1])
154 |
155 | for geometry in self.geometries:
156 | viewer.add_geometry(geometry)
157 | opt = viewer.get_render_option()
158 | ctr = viewer.get_view_control()
159 | ctr.set_front([-0.25053090455707444, -0.86588036871950691, 0.43299590405451305])
160 | ctr.set_lookat([6.298907877205747, 1.3968597508640934, 1.9543917296138904])
161 | ctr.set_up([0.06321823212762033, -0.460937346978886, -0.88517807094771861])
162 | ctr.set_zoom(0.02)
163 | # opt.show_coordinate_frame = True
164 | opt.point_size = point_size
165 | opt.light_on = False
166 | opt.line_width = 0.01
167 | opt.background_color = self.vis_bg_color
168 |
169 | viewer.capture_screen_image(filename='./test.png', do_render=True)
170 | viewer.run()
171 | viewer.destroy_window()
172 |
173 |
174 | if __name__ == '__main__':
175 | from colmap_wrapper import USER_NAME
176 |
177 | project = COLMAPLoader(project_path='/home/{}/Dropbox/07_data/misc/bunny_data/reco'.format(USER_NAME),
178 | dense_pc='fused.ply')
179 |
180 | project_vs = ColmapVisualization(colmap=project, image_resize=0.4)
181 | project_vs.visualization(frustum_scale=0.8, image_type='image')
182 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: colmap_wrapper
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python==3.8
6 | - numpy
7 | - pip
8 |
9 |
--------------------------------------------------------------------------------
/img/hilpoltstein_osm.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ed8de105c30f3699923222fd515c4ae23ff8dbd2ed2016611acecc3ff503810f
3 | size 752065
4 |
--------------------------------------------------------------------------------
/img/img.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:29e8b07e4081c0c4196ee059a6a7988e5aee0e38f8ba7c560b84c53d61f2b019
3 | size 258839
4 |
--------------------------------------------------------------------------------
/img/img_1.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ee4c5e89bab3ce44d043eb7f7cdf603f2c7d341c5131615b01d396384f27cfc6
3 | size 326432
4 |
--------------------------------------------------------------------------------
/img/img_2.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e8d06b7e658bfa8d9d4628d396f0a2e999f142a36eafccfd5ed242158e9925a2
3 | size 237584
4 |
--------------------------------------------------------------------------------
/img/map.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:98a0356363375c83db5030deb3f0f2f5620c7f326c6fff79b8963c981278684a
3 | size 246238
4 |
--------------------------------------------------------------------------------
/img/result_map.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d1c9dd8c803e5caefc2b83df047ce683b4d85e6d32c838e1ac007d1df295facf
3 | size 905839
4 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42"]
3 | build-backend = "setuptools.build_meta"
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | open3d
3 | opencv-contrib-python
4 | pyquaternion==0.9.9
5 | matplotlib
6 | tqdm==4.64.0
7 | wget==3.2
8 | pycolmap
9 | pyexiftool
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (c) 2022 Lukas Meyer
5 | Licensed under the MIT License.
6 | See LICENSE file for more information.
7 | """
8 |
9 | # Built-in/Generic Imports
10 | import setuptools
11 | from os import path
12 |
13 | this_directory = path.abspath(path.dirname(__file__))
14 | with open(path.join(this_directory, 'Readme.md'), encoding='utf-8') as f:
15 | long_description = f.read()
16 |
17 | setuptools.setup(
18 | name='colmap_wrapper',
19 | version='1.1.5',
20 | description='COLMAP Wrapper',
21 | license="MIT",
22 | long_description=long_description,
23 | long_description_content_type='text/markdown',
24 | author='Lukas Meyer',
25 | author_email='lukas.meyer@fau.de',
26 | url="https://github.com/meyerls/colmap-wrapper",
27 | packages=['colmap_wrapper.dataloader',
28 | 'colmap_wrapper.visualization',
29 | 'colmap_wrapper.data',
30 | 'colmap_wrapper.gps',
31 | 'colmap_wrapper.reconstruction'],
32 | install_requires=["numpy",
33 | "pyexiftool",
34 | "open3d",
35 | "pyquaternion",
36 | "matplotlib",
37 | "tqdm",
38 | "wget",
39 | "pycolmap"
40 | ], # external packages as dependencies
41 | classifiers=[
42 | 'Programming Language :: Python :: 3.8',
43 | 'Programming Language :: Python :: 3.9',
44 | 'Programming Language :: Python :: 3.10',
45 | "License :: OSI Approved :: MIT License",
46 | "Operating System :: OS Independent",
47 | ]
48 | )
49 |
--------------------------------------------------------------------------------