├── .gitignore
├── .gitmodules
├── .idea
├── .gitignore
├── inspectionProfiles
│ └── profiles_settings.xml
├── libraries
│ └── R_User_Library.xml
├── markdown-navigator
├── markdown-navigator.xml
├── misc.xml
├── modules.xml
├── photo4D.iml
└── vcs.xml
├── LICENSE
├── MANIFEST.in
├── README.md
├── build
└── lib
│ └── photo4d
│ ├── Class_pcl_processing.py
│ ├── Class_photo4D.py
│ ├── Detect_Sift.py
│ ├── Image_utils.py
│ ├── Process.py
│ ├── Utils.py
│ ├── XML_utils.py
│ ├── __init__.py
│ ├── __version__.py
│ └── pdal_python_filter.py
├── dist
├── photo4d-0.2.2-py2.py3-none-any.whl
└── photo4d-0.2.2.tar.gz
├── photo4d.egg-info
├── PKG-INFO
├── SOURCES.txt
├── dependency_links.txt
├── requires.txt
└── top_level.txt
├── photo4d
├── Class_pcl_processing.py
├── Class_photo4D.py
├── Detect_Sift.py
├── Image_utils.py
├── MicMac-LocalChantierDescripteur.xml
├── Process.py
├── Utils.py
├── XML_utils.py
├── __init__.py
├── __version__.py
└── pdal_python_filter.py
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | *.pyc
3 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "Pyxif"]
2 | path = Pyxif
3 | url = git@github.com:zenwerk/Pyxif.git
4 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Default ignored files
3 | /workspace.xml
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/libraries/R_User_Library.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/markdown-navigator:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/markdown-navigator.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/photo4D.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 ArcticSnow
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md LICENSE
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Photo4D: open-source time-lapse photogrammetry
2 |
3 | Contributors by alphabetical orders:
4 | - Simon Filhol (simon.filhol@geo.uio.no)
5 | - Luc Girod (luc.girod@geo.uio.no)
6 | - Alexis Perret (aperret2010@hotmail.fr)
7 | - Guillaume Sutter (sutterguigui@gmail.com)
8 |
9 | ## Description
10 |
11 | This project consists of an automated program to generate point cloud from time-lapse set of images from independent cameras. The software:
12 | 1. sorts images by timestamps,
13 | 2. assess the image quality based on lumincace and bluriness,
14 | 3. identify automatically GCPs through the stacks of images,
15 | 4. run Micmac to compute point clouds, and
16 | 5. convert point cloud to rasters. (not implemented)
17 |
18 | The project should be based on open-source libraries, for public release.
19 |
20 | ## Reference
21 |
22 | Filhol, S., Perret, A., Girod, L., Sutter, G., Schuler, T. V., and Burkhart, J. F.. ( 2019), Time‐lapse Photogrammetry of Distributed Snowdepth During Snowmelt. Water Resour. Res., 55. https://doi.org/10.1029/2018WR024530
23 |
24 | URL: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2018WR024530
25 |
26 | Sample dataset: [](https://doi.org/10.5281/zenodo.1745680)
27 |
28 | ## Installation
29 | 1. install the latest version of [micmac](https://micmac.ensg.eu/index.php/Install)
30 |
31 | 2. install python 3.6, and with anaconda, create a virtual environment with the following packages:
32 | - opencv
33 | - pandas
34 | - matplotlib
35 | - lxml
36 | - pillow
37 | - [pyxif](https://github.com/zenwerk/Pyxif) (that needs to be downloaded from https://github.com/zenwerk/Pyxif)
38 | ```sh
39 | wget https://github.com/zenwerk/Pyxif/archive/master.zip
40 | unzip master.zip
41 | cd Pyxif-master
42 | mv LICENCE.txt LICENSE.txt # As there is a typo in the License filename
43 | python setup.py install
44 | ```
45 | - [PDAL](https://pdal.io/)
46 | - json
47 |
48 | 3. The package is available via Pypi
49 |
50 | ```python
51 | pip install photo4d
52 | ```
53 |
54 | ## Usage
55 |
56 | ### 1. prepare your environment:
57 | - create a Python >= 3.6 virtual environment in which you install the required libraries (see above)
58 | - create a folder for the project with inside the project folder a folder called Images containing itself one folder per
59 | - Organize your photo with one folder per camera. For instance folder /cam1 constains all the images from Camera 1.
60 | camera
61 |
62 | ```bash
63 | ├── Project
64 | └── Images
65 | ├── Cam1
66 | ├── Cam2
67 | ├── Cam3
68 | └── Cam...
69 | ```
70 |
71 |
72 | ### 2. Use the Photo4d class to process the images through MicMac
73 |
74 | Set the path correctly in the file MicmacApp/Class_photo4D.py, and follow these steps
75 |
76 | ```python
77 |
78 | ############################################################
79 | ## Part 1
80 |
81 | import photo4d as p4d
82 |
83 | # Create a new photo4d object by indicating the Project path
84 | myproj = p4d.Photo4d(project_path="point to project folder /Project")
85 |
86 | # Algorithm to sort images in triplets, and create the reference table with sets :date, valid set, image names
87 | myproj.sort_picture()
88 |
89 | # Algorithm to check picture quality (exposure and blurriness)
90 | myproj.check_picture_quality()
91 |
92 | ############################################################
93 | ## Part 2: Estimate camera orientation
94 |
95 | # Compute camera orientation using the timeSIFT method:
96 | myproj.timeSIFT_orientation()
97 |
98 | # Convert a text file containing the GCP coordinates to the proper format (.xml) for Micmac
99 | myproj.prepare_gcp_files(path_to_GCP_file, file_format="N_X_Y_Z")
100 |
101 | # Select a set to input GCPs
102 | myproj.set_selected_set("DSC02728.JPG")
103 |
104 | # Input GCPs in 3 steps
105 | # first select 3 to five GCPs to pre-orient the images
106 | myproj.pick_initial_gcps()
107 |
108 | # Apply transformation based on the few GCPs previously picked
109 | myproj.compute_transform()
110 |
111 | # Pick additionnal GCPs, that are now pre-estimated
112 | myproj.pick_all_gcps()
113 |
114 | ############################################################
115 | ## Part2, optional: pick GCPs on extre image set
116 | ## If you need to pick GCPs on another set of images, change selected set (this can be repeated n times):
117 | #myproj.compute_transform()
118 | #myproj.set_selected_set("DSC02871.JPG")
119 | #myproj.pick_all_gcps()
120 |
121 | # Compute final transform using all picked GCPs
122 | myproj.compute_transform(doCampari=True)
123 |
124 | ## FUNCTION TO CHANGE FOR TIMESIFT
125 | # myproj.create_mask() #To be finished
126 |
127 | ############################################################
128 | ## Part3: Compute point clouds
129 |
130 | # Compute point cloud, correlation matrix, and depth matrix for each set of image
131 | myproj.process_all_timesteps()
132 |
133 | # Clean (remove) the temporary working direction
134 | myproj.clean_up_tmp()
135 |
136 | ```
137 |
138 | ### 3. Process the point clouds with [PDAL](https://pdal.io/)
139 |
140 | **Currently Under Development**
141 |
142 | [PDAL](https://pdal.io/) is a python library to process point cloud. It has an extensive library of algorithms available, and here we wrapped a general method to filter and extract Digital Elevation Models (DEMs) from the point clouds derived in the previous step.
143 |
144 | Micmac produces point clouds in the format `.ply`. The functions in the python class `pcl_process()` can convert, filter and crop the `.ply` point clouds and save them as `.las` files. Then the function `convert_all_pcl2dem()` will convert all point clouds stored in `my_pcl.las_pcl_flist` to DEMs.
145 |
146 | With the function `my_pcl.custom_pipeline()`, it is possible to build custom processing pipeline following the PDAL JSON syntax. This pipeline can then be executed by running the function `my_pcl.apply_custom_pipeline()`.
147 |
148 | See the source file [Class_pcl_processing.py](./photo4d/Class_pcl_processing.py) for more details.
149 |
150 | ```python
151 |
152 | # Create a pcl_class object, indication the path to the photo4d project
153 | my_pcl = p4d.pcl_process(project_path="path_to_project_folder")
154 |
155 | my_pcl.resolution = 1 # set the resolution of the final DEMs
156 |
157 | # Set the bounding box the Region of Interest (ROI)
158 | my_pcl.crop_xmin = 416100
159 | my_pcl.crop_xmax = 416900
160 | my_pcl.crop_ymin = 6715900
161 | my_pcl.crop_ymax = 6716700
162 | my_pcl.nodata = -9999
163 |
164 | # add path og the .ply point cloud files to the python class
165 | my_pcl.add_ply_pcl()
166 |
167 | # filter the point clouds with pdal routine, and save resulting point clouds as .las file
168 | my_pcl.filter_all_pcl()
169 |
170 | # add path of the .las files
171 | my_pcl.add_las_pcl()
172 |
173 | # conver the .las point clouds to DEMs (geotiff)
174 | my_pcl.convert_all_pcl2dem()
175 |
176 | # Extract Value orthophoto from RGB
177 | my_pcl.extract_all_ortho_value()
178 |
179 | ```
180 |
181 | After this section you have clean point clouds, as well as DEMs in GeoTiff ready!
182 |
183 |
184 | ## Ressources
185 |
186 | - Micmac: http://micmac.ensg.eu/index.php/Accueil
187 | - Image processing images: skimage, openCV, Pillow
188 | - python package to read exif data: https://pip.pypa.io/en/latest/user_guide/
189 |
190 | ## Development
191 |
192 | Message us to be added as a contributor, then if you can also modify the code to your own convenience with the following steps:
193 |
194 | To work on a development version and keep using the latest change install it with the following
195 |
196 | ```shell
197 | git clone git@github.com:ArcticSnow/photo4D.git
198 | pip install -e [path2folder/photo4D]
199 | ```
200 |
201 | and to upload latest change to Pypi.org, simply:
202 |
203 | 1. change the version number in the file ```photo4d/__version__.py```
204 | 2. run from a terminal from the photo4D folder, given your $HOME/.pyc is correctly set:
205 |
206 | ```shell
207 | python setup.py upload
208 | ```
209 |
210 |
--------------------------------------------------------------------------------
/build/lib/photo4d/Class_pcl_processing.py:
--------------------------------------------------------------------------------
1 | '''
2 | Class and functions to process the point clouds
3 |
4 |
5 |
6 | '''
7 |
8 | import pdal, json, glob
9 |
10 |
11 | class pcl_process(object):
12 |
13 |
14 | def __init__(self, project_path, ext='ply'):
15 |
16 | if not os.path.exists(project_path):
17 | print("ERROR The path " + project_path + " doesn't exists")
18 | return
19 | else:
20 | self.project_path = project_path
21 | os.chdir(self.project_path)
22 |
23 | # parameters for point cloud filtering
24 | self.x_offset = 410000 # these values are default for Finse, NORWAY
25 | self.y_offset = 6710000 # these values are default for Finse, NORWAY
26 |
27 | # Cropping area
28 | self.crop_xmin = 416100
29 | self.crop_xmax = 416900
30 | self.crop_ymin = 6715900
31 | self.crop_ymax = 6716700
32 |
33 | # Raster bounding box, default is same as cropping box.
34 | self.raster_xmin = self.crop_xmin
35 | self.raster_xmax = self.crop_xmax
36 | self.raster_ymin = self.crop_ymin
37 | self.raster_ymax = self.crop_ymax
38 |
39 | # parameters for conversion to GeoTiff
40 | self.resolution = 1
41 | self.radius = self.resolution * 1.4
42 | self.nodata = -9999
43 | self.gdaldriver = "GTiff"
44 | self.output_type = ["min", "max", "mean", "idw", "count", "stdev"]
45 |
46 |
47 | def add_ply_pcl(self)
48 | os.chdir(self.project_path)
49 | self.ply_pcl_flist = glob.glob("*.ply")
50 | print("=======================\n PLY point clouds added: ")
51 | for file in self.ply_pcl_flist:
52 | print(file)
53 | print(".......................")
54 | print(str(self.ply_pcl_flist.__len__()) + " point clouds added")
55 | print("=======================")
56 |
57 |
58 | def add_las_pcl(self)
59 | os.chdir(self.project_path)
60 | self.las_pcl_flist = glob.glob("*.las")
61 | print("=======================\n LAS point clouds added: ")
62 | for file in self.las_pcl_flist:
63 | print(file)
64 | print(".......................")
65 | print(str(self.las_pcl_flist.__len__()) + " point clouds added")
66 | print("=======================")
67 |
68 |
69 | def pipeline_realization(pip_json, print_result):
70 | try:
71 | # ===============================================
72 | # Pipeline execution
73 | pipeline = pdal.Pipeline(pip_json)
74 | pipeline.validate() # check if our JSON and options were good
75 |
76 | pipeline.execute()
77 |
78 | if print_result:
79 | arrays = pipeline.arrays
80 | metadata = pipeline.metadata
81 | log = pipeline.log
82 | print("\n================")
83 | print("Arrays:")
84 | print(arrays)
85 | print("\n================")
86 | print("Metadata:")
87 | print(metadata)
88 | print("\n================")
89 | print("Log:")
90 | print(log)
91 |
92 | print("pdal pipeline finished")
93 | return True
94 | except:
95 | print(" Error !!")
96 | return False
97 |
98 |
99 | def filter_pcl(self, file_input, file_output, print_result=True):
100 | '''
101 | Function to filter a point cloud: cropping to ROI, removing statistically outliers, saving output to .las format
102 | '''
103 | pip_filter_json = json.dumps(
104 | {
105 | "pipeline":
106 | [
107 | file_input,
108 | {
109 | "type":"filters.python",
110 | "script":"pdal_python_filter.py",
111 | "function":"add_XY_UTM",
112 | "pdalargs":{"x_offset":self.x_offset,"y_offset":self.y_offset}
113 | },
114 | {
115 | "type":"filters.crop",
116 | "bounds":str(([self.crop_xmin, self.crop_xmax], [self.crop_ymin, self.crop_ymax]))
117 | },
118 | {
119 | "type": "filters.range",
120 | "limits": "Z[" + str(zmin) + ":" + str(zmax) + "]"
121 | },
122 | {
123 | "type":"filters.lof",
124 | "minpts":20
125 | },
126 | {
127 | "type":"filters.range",
128 | "limits":"LocalOutlierFactor[:1.2]"
129 | },
130 | {
131 | "type": "filters.range",
132 | "limits": "Classification![7:12]"
133 | },
134 | {
135 | "type":"writers.las",
136 | "filename":file_ouput,
137 | "scale_x":1,
138 | "scale_y":1,
139 | "scale_z":1
140 |
141 | }
142 | ]
143 | }
144 | )
145 | pipeline_realization(pip_filter_json, print_result=print_result)
146 |
147 |
148 | def filter_all_pcl(self, print_result=True):
149 | '''
150 | Function to process all pcl with filter_pcl() function
151 | '''
152 | print("=======================")
153 | for file in self.ply_pcl_flist:
154 | filter_pcl(self, file, file[:-4] + '_clean.las', print_result=print_result)
155 | print(".......................")
156 | print("All PLY files filtered")
157 | print("=======================")
158 |
159 |
160 | def convert_pcl2dem(self, input_file, output_file, print_result=True):
161 | '''
162 | Function to convert .las point cloud to a raster (.tif)
163 | '''
164 | pip_dem = json.dumps(
165 | {
166 | "pipeline":[
167 | {"type": "readers.las",
168 | "filename": file_input
169 | },
170 | {
171 | "filename": file_ouput # file.split('.')[0] + '_' + str(resolution) + 'm.tif',
172 | "gdaldriver":"GTiff",
173 | "output_type":"all",
174 | "resolution":self.resolution,
175 | "radius": self.radius,
176 | "bounds": str(([self.raster_xmin, self.raster_xmax], [self.raster_ymin, self.raster_ymax])),
177 | "type": "writers.gdal",
178 | "nodata":self.nodata
179 | }
180 | ]
181 | })
182 | pipeline_realization(pip_dem, print_result=print_result)
183 |
184 |
185 | def convert_all_pcl2dem(self, print_result=True):
186 | '''
187 | Function to process all pcl with filter_pcl() function
188 | '''
189 | print("=======================")
190 | for file in self.las_pcl_flist:
191 | convert_pcl2dem(self, file, file[:-4] + '_' + str(self.resolution) + 'm.tif', print_result=print_result)
192 | print(".......................")
193 | print("All LAS converted to DEMs")
194 | print("=======================")
195 |
196 | def extract_ortho_value(self, input_file, output_file, print_result=True):
197 | '''
198 | Function to convert .las point cloud to a raster (.tif)
199 | '''
200 | pip_ortho = json.dumps(
201 | {
202 | "pipeline":[
203 | {"type": "readers.las",
204 | "filename": input_file
205 | },
206 | {
207 | "type": "filters.python",
208 | "script": "pdal_python_filter.py",
209 | "function": "rgb2value",
210 | "add_dimension": "Value",
211 | "module": "anything"
212 | },
213 | {
214 | "filename": output_file#file.split('.')[0] + '_' + str(resolution) + 'm_value.tif',
215 | "gdaldriver":"GTiff",
216 | "output_type":"mean",
217 | "dimension" : "Value",
218 | "resolution":self.resolution,
219 | "radius": self.radius,
220 | "bounds": str(([self.raster_xmin, self.raster_xmax], [self.raster_ymin, self.raster_ymax])),
221 | "type": "writers.gdal",
222 | "nodata":self.nodata
223 | }
224 | ]
225 | })
226 | pipeline_realization(pip_dem, print_result=print_result)
227 |
228 | def extract_all_ortho_value(self, print_result=True):
229 | '''
230 | Function to process all pcl and derive an orthophoto containing the Value (computed from RGB to HSV)
231 | '''
232 |
233 | print("=======================")
234 | for file in self.las_pcl_flist:
235 | extract_ortho_value(self, file, file[:-4] + '_' + str(self.resolution) + 'm_value.tif', print_result=print_result)
236 | print(".......................")
237 | print("All LAS converted to Value orthophoto (monochrome)")
238 | print("=======================")
239 |
240 |
241 | def custom_pipeline(self, json_pipeline):
242 | '''
243 | Function to enter a custom made pdal pipeline. Input should be a Json format pipeline following the format compatible with PDAL instructions
244 | '''
245 | return json.dumps(json_pipeline)
246 |
247 |
248 | def apply_custom_pipeline(self, pipeline, file_list=self.las_pcl_flist, print_result=True):
249 | """
250 | Function to apply a custom pipeline to
251 | """
252 | print("=======================")
253 | for file in file_list:
254 | pipeline_realization(pipeline, print_result)
255 | print(".......................")
256 | print("Custom pipeline applied to all files")
257 | print("=======================")
258 |
259 |
260 |
261 | if __name__ == "__main__":
262 |
263 | # Create a pcl_class object, indication the path to the photo4d project
264 | my_pcl = pcl_process(project_path="path_to_project_folder")
265 |
266 | my_pcl.resolution = 1 # set the resolution of the final DEMs
267 |
268 | # Set the bounding box the Region of Interest (ROI)
269 | my_pcl.crop_xmin = 416100
270 | my_pcl.crop_xmax = 416900
271 | my_pcl.crop_ymin = 6715900
272 | my_pcl.crop_ymax = 6716700
273 | my_pcl.nodata = -9999
274 |
275 | # add path og the .ply point cloud files to the python class
276 | my_pcl.add_ply_pcl()
277 |
278 | # filter the point clouds with pdal routine, and save resulting point clouds as .las file
279 | my_pcl.filter_all_pcl()
280 |
281 | # add path of the .las files
282 | my_pcl.add_las_pcl()
283 |
284 | # conver the .las point clouds to DEMs (geotiff)
285 | my_pcl.convert_all_pcl2dem()
286 |
287 | # Extract Value orthophoto from RGB
288 | my_pcl.extract_all_ortho_value()
289 |
290 | ###########
291 | # Custom processing pdal pipeline
292 |
293 |
294 |
295 |
--------------------------------------------------------------------------------
/build/lib/photo4d/Class_photo4D.py:
--------------------------------------------------------------------------------
1 | '''
2 | Program XXX Part I
3 |
4 |
5 |
6 | '''
7 |
8 | # import public library
9 | import os
10 | from os.path import join as opj
11 | import numpy as np
12 | from typing import Union
13 | from shutil import copyfile, rmtree, copytree
14 | from distutils.dir_util import copy_tree
15 |
16 | # Import project libary
17 | import photo4d.Process as proc
18 | import photo4d.Utils as utils
19 | import photo4d.Detect_Sift as ds
20 | import photo4d.Image_utils as iu
21 |
22 |
23 | class Photo4d(object):
24 | # Class constants
25 | # folders
26 | IMAGE_FOLDER = 'Images'
27 | ORI_FOLDER = "Ori-Ini"
28 | ORI_FINAL = "Ori-Bascule"
29 | MASK_FOLDER = 'Masks'
30 | GCP_FOLDER = 'GCP'
31 | RESULT_FOLDER = "Results"
32 | # file names
33 | GCP_COORD_FILE_INIT = 'GCPs_coordinates.xml'
34 | GCP_COORD_FILE_FINAL = 'GCPs_pick-S3D.xml'
35 | DF_DETECT_FILE = 'df_detect.csv'
36 | SET_FILE = 'set_definition.txt'
37 | GCP_PRECISION=0.2 # GCP precision in m
38 | GCP_POINTING_PRECISION=10 # Pointing precision of GCPs in images (pixels)
39 | GCP_PICK_FILE = 'GCPs_pick.xml'
40 | GCP_PICK_FILE_2D = 'GCPs_pick-S2D.xml'
41 | GCP_DETECT_FILE = 'GCPs_detect-S2D.xml'
42 | GCP_NAME_FILE = 'GCPs_names.txt'
43 | shift=[410000, 6710000, 0]
44 | useMask=False
45 | # Parameters
46 | distortion_model="Figee"
47 |
48 |
49 | def __init__(self, project_path, ext='JPG'):
50 | if not os.path.exists(project_path):
51 | print("ERROR The path " + project_path + " doesn't exists")
52 | return
53 |
54 | # add main folder
55 | self.project_path = os.path.abspath(project_path)
56 | print("Creation of object photo4d on the folder " + self.project_path)
57 |
58 | # add camera folders
59 | if os.path.exists(opj(self.project_path, Photo4d.IMAGE_FOLDER)):
60 | self.cam_folders = [opj(self.project_path, Photo4d.IMAGE_FOLDER, cam) for cam in
61 | os.listdir(opj(self.project_path, Photo4d.IMAGE_FOLDER))]
62 | self.nb_folders = len(self.cam_folders)
63 | print("Added {} camera folders : \n {}".format(self.nb_folders, '\n '.join(self.cam_folders)))
64 | else:
65 | print('You must create a folder "' + Photo4d.IMAGE_FOLDER + '/" containing your camera folders')
66 | return
67 |
68 | # =========================================================================
69 | # add picture sets
70 | picture_set_def = opj(self.project_path, Photo4d.SET_FILE)
71 | if os.path.exists(picture_set_def):
72 | self.sorted_pictures = utils.pictures_array_from_file(picture_set_def)
73 | print("Added picture sets from " + picture_set_def)
74 | else:
75 | self.sorted_pictures = None
76 | # set default selected set to the last one
77 | self.selected_picture_set = -1
78 |
79 | # =========================================================================
80 | # add initial orientation
81 | if os.path.exists(opj(self.project_path, Photo4d.ORI_FOLDER)):
82 | print("Added initial orientation")
83 | self.in_ori = opj(self.project_path, Photo4d.ORI_FOLDER)
84 | else:
85 | self.in_ori = None
86 |
87 | # =========================================================================
88 | # add image masks
89 | if os.path.exists(opj(self.project_path, Photo4d.MASK_FOLDER)):
90 | self.masks = opj(self.project_path, Photo4d.MASK_FOLDER)
91 | print("Masks created from ") # todo add the set of masks (and ori)
92 | else:
93 | self.masks = None
94 |
95 | # add GCP initial files
96 | # =========================================================================
97 | if os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)):
98 | self.gcp_coord_file = opj(self.project_path,Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)
99 | print("Added gcp coordinates file")
100 | else:
101 | self.gcp_coord_file = None
102 | if os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)):
103 | self.gcp_names = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)
104 | else:
105 | self.gcp_names = None
106 |
107 | # extension of the images
108 | self.ext = ext
109 |
110 | # condition on picture dates, to process only a few sets
111 | self.cond = None
112 |
113 | # Create temp folder
114 | self.tmp_path = opj(self.project_path, "tmp")
115 | if not os.path.exists(self.tmp_path):
116 | os.makedirs(self.tmp_path)
117 |
118 |
119 |
120 | def __str__(self):
121 | string = "\n=======================================================================\n" \
122 | "Project Photo4d located at " + self.project_path + \
123 | "\n======================================================================="
124 | string += "\n Contains {} camera folders : \n {}".format(self.nb_folders, '\n '.join(self.cam_folders))
125 | if self.sorted_pictures is None:
126 | string += "\n Pictures unsorted"
127 | else:
128 | string += "\n Pictures sorted in {} sets ".format(len(self.sorted_pictures))
129 | string += "\n The current selected set is {}".format(self.sorted_pictures[self.selected_picture_set][1:])
130 |
131 | string += "\n=======================================================================\n"
132 | if self.in_ori is not None:
133 | string += " Initial orientation computed"
134 | string += "\n=======================================================================\n"
135 |
136 | if self.masks is not None:
137 | string += " Masks done "
138 | string += "\n=======================================================================\n"
139 |
140 | if self.gcp_coord_file is not None:
141 | string += " Absolute coordinates of GCPs are given"
142 | if self.dict_image_gcp is not None:
143 | string += "\n GCPs image coordinates are computed "
144 | string += "\n=======================================================================\n"
145 |
146 | return string
147 |
148 | def sort_picture(self, time_interval=600):
149 | self.sorted_pictures = iu.sort_pictures(self.cam_folders, opj(self.project_path, Photo4d.SET_FILE),
150 | time_interval=time_interval,
151 | ext=self.ext)
152 | return self.sorted_pictures
153 |
154 |
155 | def check_picture_quality(self, luminosity_thresh=1, blur_thresh=6):
156 | '''
157 | Function to Check if pictures are not too dark and/or too blurry (e.g. fog)
158 | '''
159 | if self.sorted_pictures is None:
160 | print("ERROR You must launch the sort_pictures() method before check_pictures()")
161 | return
162 | self.sorted_pictures = iu.check_picture_quality(self.cam_folders, opj(self.project_path, Photo4d.SET_FILE),
163 | self.sorted_pictures,
164 | lum_inf=luminosity_thresh,
165 | blur_inf=blur_thresh)
166 | return self.sorted_pictures
167 |
168 |
169 | def timeSIFT_orientation(self, resolution=5000, distortion_mode='Fraser', display=False, clahe=False,
170 | tileGridSize_clahe=8):
171 | '''
172 | Function to initialize camera orientation of the reference set of images using the Micmac command Tapas
173 | '''
174 | # change from working dir to tmp dir
175 | os.chdir(self.tmp_path)
176 |
177 | # select the set of good pictures to estimate initial orientation
178 |
179 |
180 | for s in range(len(self.sorted_pictures)):
181 | if self.sorted_pictures[s, 1]:
182 | selected_line = self.sorted_pictures[s]
183 |
184 | for i in range(len(self.cam_folders)):
185 | in_path = opj(self.cam_folders[i], selected_line[i + 2])
186 | out_path = opj(self.tmp_path, selected_line[i + 2])
187 | if clahe:
188 | iu.process_clahe(in_path, tileGridSize_clahe, out_path=out_path)
189 | else:
190 | copyfile(in_path, out_path)
191 |
192 | # Execute mm3d command for orientation
193 | success, error = utils.exec_mm3d("mm3d Tapioca All {} {}".format(".*" + self.ext, resolution), display=display)
194 | success, error = utils.exec_mm3d(
195 | "mm3d Tapas {} {} Out={}".format(distortion_mode, ".*" + self.ext, Photo4d.ORI_FOLDER[4:]), display=display)
196 |
197 | ori_path = opj(self.project_path, Photo4d.ORI_FOLDER)
198 | if success == 0:
199 | # copy orientation file
200 | if os.path.exists(ori_path): rmtree(ori_path)
201 | copytree(opj(self.tmp_path, Photo4d.ORI_FOLDER), ori_path)
202 | self.in_ori = ori_path
203 | else:
204 | print("ERROR Orientation failed\nerror : " + str(error))
205 |
206 | os.chdir(self.project_path)
207 |
208 |
209 | def create_mask_masterIm(self, del_pictures=True, master_folder_id=0):
210 | '''
211 | Create a mask on the image of the master_folder_id for the selected set
212 | Note : Only the mask of the central (MASTER) image is necessary
213 | '''
214 |
215 | if not os.path.exists(self.tmp_path): os.makedirs(self.tmp_path)
216 | # select the set of good pictures to estimate initial orientation
217 | selected_line = self.sorted_pictures[self.selected_picture_set]
218 | in_path = opj(self.cam_folders[master_folder_id], selected_line[master_folder_id + 2])
219 | out_path = opj(self.tmp_path, selected_line[master_folder_id + 2])
220 | copyfile(in_path, out_path)
221 | ds.exec_mm3d('mm3d SaisieMasqQT {} Name=Mask.tif'.format(out_path))
222 | self.useMask=True
223 |
224 | def prepare_gcp_files(self, gcp_coords_file, file_format='N_X_Y_Z', display=True):
225 | '''
226 | Function to prepare GCP coordinate from a textfile to Micmac xml format. Make sure your text file format is correct
227 | '''
228 |
229 | if not os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER)):
230 | os.makedirs(opj(self.project_path, Photo4d.GCP_FOLDER))
231 |
232 | # copy coordinates file into the project
233 | path2txt = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)[:-4] + ".txt"
234 | copyfile(gcp_coords_file, path2txt)
235 |
236 | success, error = utils.exec_mm3d('mm3d GCPConvert #F={} {}'.format(file_format, path2txt),
237 | display=display)
238 | if success == 0:
239 | self.gcp_coord_file = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)
240 | gcp_table = np.loadtxt(path2txt, dtype=str)
241 |
242 | try:
243 | gcp_name = gcp_table[:, file_format.split('_').index("N")]
244 | np.savetxt(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE), gcp_name, fmt='%s',
245 | newline=os.linesep)
246 | self.gcp_names = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)
247 | except ValueError: # todo add a coherent except
248 | print("ERROR prepare_GCP_files(): Check file format and file delimiter. Delimiter is any space")
249 | else:
250 | print("ERROR prepare_GCP_files(): Check file format and file delimiter. Delimiter is any space")
251 | return 0
252 |
253 | def pick_initial_gcps(self):
254 | '''
255 | Function to pick GCP locations on the reference set of images with no a priori.
256 |
257 | Pick few GCPs (3 to 5) that MicMac can do a rough estimate of the camera orientation. Then go to pick_gcp_basc() to pick all GCPs of known location
258 | '''
259 | os.chdir(self.tmp_path)
260 |
261 | if self.gcp_coord_file is None or self.gcp_names is None:
262 | print("ERROR prepare_gcp_files must be applied first")
263 | gcp_path = opj(self.project_path, Photo4d.GCP_FOLDER)
264 | copy_tree(opj(gcp_path), opj(self.tmp_path))
265 | # select the set of image on which to pick GCPs manually
266 | selected_line = self.sorted_pictures[self.selected_picture_set]
267 | file_set = "("
268 | for i in range(len(self.cam_folders)):
269 | file_set += selected_line[i + 2] + "|"
270 | file_set = file_set[:-1] + ")"
271 |
272 | commandSaisieAppuisInitQt='mm3d SaisieAppuisInitQt "{}" Ini {} {}'.format(file_set, self.GCP_NAME_FILE,
273 | self.GCP_PICK_FILE)
274 | print(commandSaisieAppuisInitQt)
275 | utils.exec_mm3d(commandSaisieAppuisInitQt)
276 |
277 | # Go back from tmp dir to project dir
278 | os.chdir(self.project_path)
279 |
280 |
281 | def pick_all_gcps(self, resolution=5000):
282 | '''
283 | Function to pick GCP locations on the reference set of images, with a predicted position.
284 |
285 | Pick all GCPs of known location.
286 | '''
287 |
288 | os.chdir(self.tmp_path)
289 |
290 | # select the set of image on which to pick GCPs manually
291 | selected_line = self.sorted_pictures[self.selected_picture_set]
292 | file_set = "("
293 | for i in range(len(self.cam_folders)):
294 | file_set += selected_line[i + 2] + "|"
295 | file_set = file_set[:-1] + ")"
296 |
297 | command='mm3d SaisieAppuisPredicQt "{}" Bascule-Ini {} {}'.format(file_set,
298 | self.GCP_COORD_FILE_INIT,
299 | self.GCP_PICK_FILE)
300 | print(command)
301 | utils.exec_mm3d(command)
302 |
303 | # Go back from tmp dir to project dir
304 | os.chdir(self.project_path)
305 |
306 | def compute_transform(self, doCampari=False):
307 | '''
308 | Function to apply the transformation computed from the GCPs to all images.
309 |
310 | Set doCampari=True once all points are input and you are ready to carry on.
311 | '''
312 |
313 | os.chdir(self.tmp_path)
314 |
315 | # select all the images
316 | file_set = ".*" + self.ext
317 |
318 | commandBasc = 'mm3d GCPBascule {} Ini Bascule-Ini {} {}'.format(file_set,
319 | self.GCP_COORD_FILE_INIT,
320 | self.GCP_PICK_FILE_2D)
321 | print(commandBasc)
322 | utils.exec_mm3d(commandBasc)
323 |
324 | if(doCampari):
325 | command = 'mm3d Campari {} Bascule-Ini Bascule GCP=[{},{},{},{}] AllFree=1'.format(file_set, self.GCP_COORD_FILE_INIT, self.GCP_PRECISION, self.GCP_PICK_FILE_2D, self.GCP_POINTING_PRECISION)
326 | print(command)
327 | success, error = utils.exec_mm3d(command)
328 | if success == 0:
329 | # copy orientation file
330 | ori_path = opj(self.project_path,self.ORI_FINAL)
331 | if os.path.exists(ori_path): rmtree(ori_path)
332 | copytree(opj(self.tmp_path, Photo4d.ORI_FINAL), ori_path)
333 | else:
334 | print("ERROR Orientation failed\nerror : " + str(error))
335 |
336 | # Go back from tmp dir to project dir
337 | os.chdir(self.project_path)
338 |
339 |
340 |
341 |
342 | def pick_ManualTiePoints(self):
343 | '''
344 | Function to pick additional points that can be set as 'GCPs'. These will get coordinates estimates based on camera orientation, and will be used in other set of images for triangulation.
345 | This way, we artificailly increase the number of GCPs, and use the selected set of reference images as the absolute reference to which other 3D model will be orientated against.
346 |
347 | Pick as many points as possible that are landmarks across the all set of image.
348 | '''
349 |
350 | os.chdir(self.tmp_path)
351 |
352 | # select the set of image on which to pick GCPs manually
353 | selected_line = self.sorted_pictures[self.selected_picture_set]
354 | file_set = "("
355 | for i in range(len(self.cam_folders)):
356 | file_set += selected_line[i + 2] + "|"
357 | file_set = file_set[:-1] + ")"
358 |
359 | command='mm3d SaisieAppuisPredicQt "{}" Ori-Bascule {} {}'.format(file_set,
360 | self.GCP_COORD_FILE_INIT,
361 | self.GCP_PICK_FILE)
362 | print(command)
363 | utils.exec_mm3d(command)
364 | self.gcp_coord_file = opj(self.project_path,Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_FINAL)
365 |
366 | # Go back from tmp dir to project dir
367 | os.chdir(self.project_path)
368 |
369 |
370 | def process_all_timesteps(self, master_folder_id=0, clahe=False, tileGridSize_clahe=8,
371 | zoomF=1, Ori='Bascule', DefCor=0.0, shift=None, keep_rasters=True, display=False):
372 | if self.sorted_pictures is None:
373 | print("ERROR You must apply sort_pictures() before doing anything else")
374 | return
375 |
376 | proc.process_all_timesteps(self.tmp_path, self.sorted_pictures, opj(self.project_path, Photo4d.RESULT_FOLDER),
377 | clahe=clahe, tileGridSize_clahe=tileGridSize_clahe, zoomF=zoomF,
378 | master_folder_id=master_folder_id, Ori=Ori, useMask=self.useMask, DefCor=DefCor,
379 | shift=shift, keep_rasters=keep_rasters, display_micmac=display)
380 |
381 |
382 |
383 | def set_selected_set(self, img_or_index: Union[int, str]):
384 | if self.sorted_pictures is None:
385 | print("ERROR You must apply sort_pictures before trying to chose a set")
386 | return
387 | else:
388 | if type(img_or_index) == int:
389 | self.selected_picture_set = img_or_index
390 | print(
391 | "\n The current selected set is now {}".format(self.sorted_pictures[self.selected_picture_set][2:]))
392 | elif type(img_or_index) == str:
393 | found, i = False, 0
394 | while (not found) and (i < len(self.sorted_pictures)):
395 | if img_or_index in self.sorted_pictures[i]:
396 | found = True
397 | self.selected_picture_set = i
398 | print("\n The current selected set is now {}".format(
399 | self.sorted_pictures[self.selected_picture_set][2:]))
400 | i += 1
401 | if not found:
402 | print('image {} not in sorted_pictures'.format(img_or_index))
403 |
404 |
405 |
406 | def clean_up_tmp(self):
407 | '''
408 | Function to delete the working folder.
409 | '''
410 | try:
411 | rmtree(self.tmp_path)
412 | except FileNotFoundError:
413 | pass
414 | except PermissionError:
415 | print("Permission Denied, cannot delete " + self.tmp_path)
416 | except OSError:
417 | pass
418 |
419 |
420 |
421 | if __name__ == "__main__":
422 |
423 | ## Initialyze the project
424 | myproj = p4d.Photo4d(project_path=r"C:\Users\lucg\Desktop\Test_V1_2019")
425 | # myproj.sort_picture()
426 | # myproj.check_picture_quality()
427 | # myproj.prepare_gcp_files(r"C:\Users\lucg\Desktop\Test_V1_2019\GCPs_coordinates_manual.txt",file_format="N_X_Y_Z")
428 |
429 | ## Create a mask on one of the master images to limit the area where correlation is attempted
430 | # myproj.create_mask_masterIm(1)
431 |
432 | ## Compute tie points throughout the stack
433 | # myproj.timeSIFT_orientation()
434 | ## TODO : mask tie points
435 |
436 | ## Deal with GCPs
437 | ## Select a set to input GCPs
438 | # myproj.set_selected_set("DSC02728.JPG")
439 | ## Input GCPs in 3 steps
440 | # myproj.pick_initial_gcps()
441 | # myproj.compute_transform()
442 | # myproj.pick_all_gcps()
443 | ## Eventually, change selected set to add GCP imput to more image (n times):
444 | #myproj.compute_transform()
445 | #myproj.set_selected_set("DSC02871.JPG")
446 | #myproj.pick_all_gcps()
447 | #myproj.compute_transform(doCampari=True)
448 |
449 | ## Do the dense matching
450 | # myproj.process_all_timesteps()
451 |
452 | ## Cleanup
453 | # myproj.clean_up_tmp()
--------------------------------------------------------------------------------
/build/lib/photo4d/Detect_Sift.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import os
3 |
4 | #from photo4d.Process import pictures_array_from_file
5 | from photo4d.Image_utils import load_date
6 | import photo4d.XML_utils as uxml
7 | from photo4d.Utils import exec_mm3d
8 |
9 | from shutil import rmtree
10 | import pandas as pd
11 | import numpy as np
12 |
13 |
14 | """
15 | Compute GCP positions using a MicMac command : Tapioca
16 | """
17 |
18 |
19 | def cut_image(image_path, pos, kernel_size, output_name="", output_folder="/"):
20 | """
21 | extract a portion of an image, centered on pos, of a given size (works for .JPG only)
22 | :param image_path: path of the input image
23 | :param pos: tuple, position for the center of output image (xpos, ypos)
24 | :param kernel_size: tuple, size of the output image, in pixels
25 | :param output_name: full name for output file (ex: "output.JPG")
26 | :param output_folder: folder for saving output, must already exist
27 | """
28 | if output_name == "": output_name = image_path.split('/')[-1].split(".")[-2] + "_cut" + ".JPG"
29 | if not os.path.exists(output_folder): os.makedirs(output_folder)
30 | img = cv.imread(image_path)
31 | pos = int(pos[0]), int(pos[1])
32 | ysize, xsize = img.shape[0], img.shape[1]
33 | if (0 <= pos[0] <= ysize) and (0 <= pos[1] <= xsize):
34 | xmin, xmax = pos[1] - kernel_size[1] // 2, (pos[1] + kernel_size[1] // 2)
35 | ymin, ymax = pos[0] - kernel_size[0] // 2, pos[0] + kernel_size[0] // 2
36 |
37 | output = img[ymin:ymax, xmin:xmax]
38 |
39 | cv.imwrite(output_folder + output_name, output)
40 | else:
41 | print("\033[0;31Position {} not in the picture {}, with size {}\image ignored\033[0m".format(pos, image_path,
42 | img.shape))
43 |
44 |
45 | def detect_from_s2d_xml(s2d_xml_path, folder_list, pictures_array, samples_folder_list=None,
46 | kernel_size=(200, 200), display_micmac=False):
47 | """
48 | write extracts files from an xml with image position, and launch detection of the points for all files in folder
49 | :param s2d_xml_path: file created by the function SaisieAppuisInitQT, in MicMac
50 | :param folder_list: list of folders containing pictures. One folder is for one camera
51 | :param samples_folder_list: list of folder where to save samples, in the same order as folder_list
52 | by default, create "Samples/" folder in each camera folder
53 | :param pictures_array: array containing names of pictures to process
54 | each row is considered as a set, and pictures names must be in the same order as secondary_folder_list
55 | the first item of a row is a boolean, indicating if the set is valid or not
56 | :param kernel_size: size of the portion of picture to cut for detection (in pixels)
57 | :param display_micmac: to activate or stop printing MicMac log
58 | :return:
59 | Data Frame where each row store initial and detected image position of the tie points :
60 | [image name, gcp name, TP coord X in image_ini, TP Y ini,index of folder/camera, date,
61 | TP coord X in image detect, TP Y detect, coord X GCP ini, Y GCP ini]
62 | """
63 | # do some checks, and set default values for sample_folder_list
64 | if s2d_xml_path[-4:] != ".xml": raise IOError("The parameter S2D_xml_path must be an .xml file")
65 | nb_folders = len(folder_list)
66 | for i in range(nb_folders):
67 | folder = folder_list[i]
68 | if not os.path.exists(folder):
69 | raise IOError("Invalid path " + folder + " in folder_list")
70 | if folder[-1] != "/": folder_list[i] += "/"
71 |
72 | if samples_folder_list is None:
73 | samples_folder_list = []
74 | for i in range(nb_folders):
75 | samples_folder_list.append(folder_list[i] + "Samples/")
76 | elif nb_folders != len(samples_folder_list):
77 | print("WARNING the parameter samples_folder_list must have the same number of folders as folder_list")
78 | return
79 | for i in range(nb_folders):
80 | samples_folder = samples_folder_list[i]
81 | if not os.path.exists(samples_folder):
82 | try:
83 | os.makedirs(samples_folder)
84 | except IOError:
85 | print("WARNING Invalid path " + samples_folder + " in samples_folder_list")
86 | return
87 | if samples_folder[-1] != "/": samples_folder_list[i] += "/"
88 |
89 | # ==================================================================================================================
90 | # collect data from xml
91 | dict_img = uxml.read_S2D_xmlfile(s2d_xml_path)
92 | panda_result = [] # store all the results
93 | # iterate over pictures
94 | for image in dict_img.keys():
95 |
96 | dict_measures = dict_img[image]
97 |
98 | # try to found the image in all folders (assuming all pictures have different names)
99 | # if the image is found, do the detection in this folder
100 | found = False
101 | i = 0
102 | while not found and i < nb_folders:
103 | if image in os.listdir(folder_list[i]):
104 | found = True
105 | # if the image is found, launch the detection
106 | # ==============================================
107 | print("\nDetection launched for picture {} as reference...".format(image))
108 | for gcp in dict_measures.keys():
109 | print("\n Detection of point {} in folder {}/{}".format(gcp, i + 1, nb_folders))
110 | pos_ini = dict_measures[gcp]
111 | date = load_date(folder_list[i] + image)
112 | # add a line for the master image, with the gcp position, because micmac won't launch
113 | # the detection on this one, but the point coordinates are still useful
114 | panda_result.append(
115 | [image, gcp, kernel_size[0]/2, kernel_size[1]/2, i, date,
116 | kernel_size[0] / 2, kernel_size[1] / 2, pos_ini[0], pos_ini[1], image])
117 |
118 | # creation of extract for each picture of the folder, around the point initial position
119 | print(" Create extract for detection :\n")
120 | images_list = [] # store pictures to use for detection
121 | for line in pictures_array:
122 | if line[0]:
123 | print(" - " + line[i + 1] + "... ", end="")
124 |
125 | cut_image(folder_list[i] + line[i + 1], (pos_ini[1], pos_ini[0]), kernel_size=kernel_size,
126 | output_folder=samples_folder_list[i] + gcp + "/", output_name=line[i + 1])
127 | images_list.append(line[i + 1])
128 | print("done")
129 |
130 | # launch Tapioca on the selected files
131 | # ==============================================
132 | # create file telling MicMac which files to process
133 | uxml.write_couples_file(samples_folder_list[i] + gcp + "/" + "couples.xml", image, images_list)
134 | os.chdir(samples_folder_list[i] + gcp + "/")
135 | print("\n Launching MicMac...")
136 | command = "mm3d Tapioca File couples.xml -1 ExpTxt=1"
137 | success, error = exec_mm3d(command, display_micmac)
138 |
139 | # read results and append it to result
140 | # ==============================================
141 | print(success)
142 | if success == 0:
143 | print(" Tapioca executed with success, reading results")
144 | # read output txt files
145 | for picture_recap in os.listdir("Homol/Pastis" + image):
146 | if picture_recap.split(".")[-1] == "txt":
147 | tie_points = uxml.get_tiepoints_from_txt("Homol/Pastis" + image + "/" + picture_recap)
148 | date = load_date(folder_list[i] + picture_recap[:-4])
149 |
150 | for tie_point in tie_points:
151 | # append each tie point coordinates to the result
152 | # [image name, gcp name, TP coord X in image_ini, TP Y ini,index of folder/camera,
153 | # date, TP coord X in image detect, TP Y detect, coord X GCP ini, Y GCP ini]
154 | panda_result.append(
155 | [".".join(picture_recap.split(".")[:-1]), gcp, tie_point[0], tie_point[1], i,
156 | date,
157 | tie_point[2], tie_point[3], pos_ini[0], pos_ini[1], image])
158 | try:
159 | rmtree("Pastis")
160 | rmtree("Tmp-MM-Dir")
161 | except PermissionError:
162 | print(" couldn't erase temporary files due to permission error")
163 | else:
164 | print(" WARNING Fail in Tapioca : " + str(error))
165 |
166 | else:
167 | i += 1
168 | if not found:
169 | print("\033[0;31Picture {} cannot be find in folder_list\033[0m".format(image))
170 |
171 | return pd.DataFrame(panda_result,
172 | columns=['Image', 'GCP', 'Xini', 'Yini', 'folder_index', 'date', 'Xdetect', 'Ydetect',
173 | 'Xgcp_ini', 'Ygcp_ini', 'Image_ini'])
174 |
175 |
176 | def extract_values(df, magnitude_max=50, nb_values=5, max_dist=50, kernel_size=(200, 200), method="Median"):
177 | """
178 | extract detected positions from the DataFrame containing tie points coordinates
179 | feel free to add new methods
180 | :param df: DataFrame like the one from detect_from_s2d()
181 | :param magnitude_max: max value in pixels for the magnitude of the vector (from ini to detect)
182 | :param nb_values: max values to be used for the method
183 | the values used are the closest from the GCP initial position
184 | :param max_dist: max value in pixel for the distance from the GCP to the vector origin
185 | :param kernel_size: size of the extracts used for detection (to determine coordinates of gcp in the extracts)
186 | :param method: method to use for computing positions
187 | :return: tuple with 2 elements:
188 | - a dictionary containing the computed position of GCPs in each picture, readable for the others functions,
189 | indexed first by picture names and then by GCP names
190 | - a panda DataFrame containing the computed position of GCPs in each picture
191 | columns :
192 | ['Image', 'GCP', 'Xpos', 'Ypos', 'nb_tiepoints', 'date','nb_close_tiepoints']
193 | """
194 |
195 | # compute new positions of GCP according to the shift of each tie point
196 | df['Xshift'] = df.Xgcp_ini + df.Xdetect - df.Xini
197 | df['Yshift'] = df.Ygcp_ini + df.Ydetect - df.Yini
198 |
199 | # compute vector module
200 | df['magnitude'] = np.sqrt((df.Xini - df.Xdetect) ** 2 + (df.Yini - df.Ydetect) ** 2)
201 |
202 | # compute vector direction
203 | df['direction'] = np.arctan2((df.Xini - df.Xdetect), (df.Yini - df.Ydetect)) * 180 / np.pi + 180
204 |
205 | # compute from gcp and tie point in the initial image (gcp is in the center of the extracts)
206 | pos_center = kernel_size[0] / 2, kernel_size[1] / 2
207 | df['dist'] = np.sqrt((df.Xini - pos_center[0]) ** 2 + (df.Yini - pos_center[1]) ** 2)
208 |
209 | # filter outliers having a incoherent magnitude
210 | df_filtered = df.loc[df.magnitude <= magnitude_max]
211 |
212 | dic_image_gcp = {}
213 | result = []
214 | # iterate over images
215 | for image, group in df_filtered.groupby(['Image']):
216 | dic_gcp = {}
217 | for gcp, group_gcp in group.groupby(['GCP']):
218 | nb_tiepoints = group_gcp.shape[0]
219 | group_gcp_filtered = group_gcp.loc[group_gcp.dist <= max_dist]
220 | nb_close_tiepoints = group_gcp_filtered.shape[0]
221 | group2 = group_gcp_filtered.nsmallest(nb_values, 'dist')
222 | if group_gcp_filtered.shape[0] != 0: # if there is no values left in DataFrame, the point is ignored
223 | if method == "Median":
224 | measure = group2.Xshift.median(), group2.Yshift.median()
225 | elif method == "Mean":
226 | measure = group2.Xshift.mean(), group2.Yshift.mean()
227 | elif method == 'Min':
228 | measure = group2.Xshift.min(), group2.Yshift.min()
229 | else:
230 | print('Method must be one of these values:\n"Median"\n"Min"\n"Mean"')
231 | return
232 | date = group2.date.min()
233 | dic_gcp[gcp] = measure
234 |
235 |
236 | result.append([image, gcp, measure[0], measure[1], nb_tiepoints, date, nb_close_tiepoints])
237 | if dic_gcp != {}: dic_image_gcp[image] = dic_gcp
238 |
239 |
240 | return dic_image_gcp, pd.DataFrame(result, columns=['Image', 'GCP', 'Xpos', 'Ypos', 'nb_tiepoints', 'date',
241 | 'nb_close_tiepoints'])
242 |
243 |
244 | if __name__ == "__main__":
245 | df = detect_from_s2d_xml(
246 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/GCP/GCPs_pick-S2D.xml",
247 | ["C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_east",
248 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_mid",
249 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_west"],
250 | # pictures_array=pictures_array_from_file(
251 | # "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/set_definition.txt"),
252 | display_micmac=False
253 | )
254 | #print(df)
255 | #df.to_csv("C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/test_beau.csv", sep=",")
256 | # df = pd.read_csv("C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/test_sift_camtot_new_gcp.csv")
257 | # result = extract_values(df, threshold=50, nb_values=5, max_dist=200, method="Median")
258 | # print(result[0])
259 |
--------------------------------------------------------------------------------
/build/lib/photo4d/Image_utils.py:
--------------------------------------------------------------------------------
1 | # coding : utf8
2 | """
3 |
4 | To keep metadata when we transform the picture, we use the module pyxif (MIT License), available at :
5 | https://github.com/zenwerk/Pyxif
6 | """
7 |
8 |
9 | from datetime import datetime
10 | import pyxif, os
11 | import cv2 as cv
12 | import numpy as np
13 |
14 |
15 |
16 | def sort_pictures(folder_path_list, output_path, ext="jpg", time_interval=600):
17 | """
18 | Regroup pictures from different folders if they are taken within timeInterval seconds of interval.
19 | Result is stored in an array/file,
20 | :param folder_path_list: list of the path to folders containing pictures, each folder is for one camera
21 | :param output_path: path of the output .txt file containing picture sets
22 | :param ext: extension of the pictures
23 | :param time_interval: interval in seconds, with corresponds to the maximum time elapsed between the shooting of pics
24 | :return: array with the sorted pictures. For each set a boolean is added, always True, but can be modified later
25 | """
26 | print("\n Collecting files\n..........................................")
27 | # create a list containing image names and dates for each folder
28 | list = []
29 | for folder_path in folder_path_list:
30 | image_date_list = []
31 | flist = os.listdir(folder_path)
32 | for filename in flist:
33 | try:
34 | if filename.split(".")[-1].lower() == ext.lower():
35 | image_date_list.append((filename, load_date(os.path.join(folder_path,filename))))
36 | except IndexError:
37 | pass
38 | list.append(image_date_list)
39 | if len(list) < 1:
40 | print("WARNING not enough folder\nTwo or more folders are needed to sort files")
41 | return None
42 | elif [] in list:
43 | print("WARNING No image found in One or many folder(s)")
44 | return None
45 |
46 | sorted_pictures = []
47 | print(" Checking dates\n..........................................")
48 | with open(output_path, 'w') as f:
49 | f.write("# Pictures taken within {} of interval\n".format(time_interval))
50 |
51 | good, bad = 0, 0 # counters for correct and wrong sets
52 | # loop on the image of the first folder
53 | for image_ref in list[0]:
54 | date_ref = image_ref[1]
55 | pic_group = np.empty(len(list) + 2, dtype=object)
56 | pic_group[0] = date_ref.strftime("%Y-%m-%dT%H-%M-%S")
57 | pic_group[1] = False # the pic_group[0] is a boolean, True if all a picture is found in every folder
58 | pic_group[2] = image_ref[0]
59 | # for_file = [image_ref[0]] # list of the images taken within the interval
60 |
61 | # check for pictures taken whithin the interval
62 | for j in range(1, len(list)):
63 | folder = list[j] # list of the (filename,date) of one folder
64 | i, found = 0, False
65 | while not found and i < len(folder):
66 | date_var = folder[i][1]
67 | diff = abs(date_ref - date_var)
68 | if diff.days * 86400 + diff.seconds < time_interval: # if the two pictures are taken within 10 minutes
69 | found = True
70 | pic_group[j + 2] = folder[i][0]
71 | i += 1
72 |
73 | if None not in pic_group:
74 | good += 1
75 | pic_group[1] = True
76 | print(" Pictures found in every folder corresponding to the timeInterval " + pic_group[0] + "\n")
77 | else:
78 | bad += 1
79 | print(" Missing picture(s) corresponding to the timeInterval " + pic_group[0] + "\n")
80 |
81 | sorted_pictures.append(pic_group)
82 | with open(output_path, 'a') as f:
83 | f.write(pic_group[0] + "," + str(pic_group[1]) + "," + ",".join(str(pic_group[2:])) + "\n")
84 |
85 | end_str = "# {} good set of pictures found, {} uncomplete sets, on a total of {} sets".format(good, bad, good + bad)
86 | print(end_str)
87 | with open(output_path, 'a') as f:
88 | f.write(end_str)
89 | return np.array(sorted_pictures)
90 |
91 |
92 |
93 | def check_picture_quality(folder_list, output_path, pictures_array, lum_inf, blur_inf):
94 | """
95 | This function is supposed to be called after sort_pictures, as it uses the kind of array created by sort_pictures,
96 | which could be either collected from the return value of the function, or the file "linked_files.txt" created in
97 | the main folder
98 | It will filter pictures in which brightness is inferior to lum_inf and the "blur" (variance of Laplacian) is
99 | inferior to blur_min
100 | :param folder_list:
101 | :param output_path:
102 | :param pictures_array:
103 | :param lum_inf:
104 | :param blur_min:
105 | :return: same array, but some booleans will be set to False
106 | """
107 | print("\n Checking pictures\n..........................................")
108 |
109 | with open(output_path, 'w') as f:
110 | f.write(
111 | "# Pictures filtered with a minimum value of {} for brightness, {} for the variance of Laplacian\n".format(
112 | lum_inf, blur_inf))
113 |
114 | good, bad = 0, 0
115 | I, J = pictures_array.shape
116 | for i in range(I):
117 | if pictures_array[i, 1]:
118 | min_lum = 9999
119 | min_blur = 9999
120 | for j in range(2, J):
121 | path = os.path.join(folder_list[j - 2],pictures_array[i, j])
122 | lum = load_bright(path)
123 |
124 | if lum < min_lum:
125 | min_lum = lum
126 | blur = blurr(path, 3)
127 | if blur < min_blur:
128 | min_blur = blur
129 |
130 | if min_lum < lum_inf or min_blur < blur_inf:
131 | pictures_array[i, 1] = False
132 | bad += 1
133 | else:
134 | good += 1
135 |
136 | with open(output_path, 'a') as f:
137 | for line in pictures_array:
138 | f.write(str(line[0]) + "," + str(line[1]) + "," + ",".join(str(line[2:])) + "\n")
139 | end_line = " {} good set of pictures found, {} rejected sets, on a total of {} sets".format(good, bad, good + bad)
140 | f.write("#" + end_line)
141 | print(end_line)
142 | return pictures_array
143 |
144 |
145 |
146 | def load_date(filename):
147 | """
148 | Load date of the shot, according to te image metadata
149 | :param filename: name/path of the file
150 | :return: datetime format
151 | """
152 | try:
153 | zeroth_dict, exif_dict, gps_dict = pyxif.load(filename)
154 | date,time=exif_dict[pyxif.PhotoGroup.DateTimeOriginal][1].split(" ")
155 | year, month,day = date.split(":")
156 | hour,minute,sec = time.split(":")
157 | dateimage= datetime(int(year), int(month), int(day), int(hour), int(minute) ,int(sec))
158 | return dateimage
159 | except KeyError:
160 | print("WARNING No date for file " + filename)
161 | return None
162 | except FileNotFoundError:
163 | print("WARNING Could not find file " + filename )
164 | return None
165 |
166 |
167 | def load_bright(filename):
168 | """
169 | Load luminosity of the shot scene, according to te image metadata
170 | :param filename: name/path of the file
171 | :return: float, level of brightness
172 |
173 | TODO: Add a method to estimate BrightnessValue of image if the field is not available from picture EXIF.
174 | """
175 | try:
176 | zeroth_dict, exif_dict, gps_dict = pyxif.load(filename)
177 | num,denom=exif_dict[pyxif.PhotoGroup.BrightnessValue][1]
178 | brightness=num/denom
179 | return brightness
180 | except KeyError:
181 | print("WARNING No brightness data for file " + filename)
182 | print("Check if your exif data contains a 'BrightnessValue' tag ")
183 | return None
184 | except FileNotFoundError:
185 | print("WARNING Could not find file " + filename )
186 | return None
187 |
188 |
189 | def calc_lum(filename):
190 | image_bgr = cv.imread(filename)
191 | image_lab = cv.cvtColor(image_bgr, cv.COLOR_BGR2LAB)
192 | average_lum = cv.mean(cv.split(image_lab)[0])
193 | return average_lum
194 |
195 |
196 |
197 | def process_clahe_folder(in_folder, tileGridSize, grey=False, out_folder="", clip_limit=2,new_name_end="_Clahe"):
198 | """
199 | Apply CLAHE to all jpeg files if a given folder
200 | It is not possible to overwrite files, because the initial files are needed to copy-past metadata
201 |
202 | :param in_folder: input folder path
203 | :param tileGridSize: size of the "blocks" to apply local histogram equalization
204 | :param grey: if True, the image will be converted to grayscale
205 | :param clip_limit: contrast limit, used to avoid too much noise
206 | :param out_folder: output folder path
207 | :param new_name_end: string put at the end of output files, without the extension
208 | :return:
209 | """
210 |
211 | # Process all the jpeg pictures in the following folder
212 | flist = np.sort(os.listdir(in_folder))
213 |
214 | for f in flist:
215 | try:
216 | if f.split(".")[-1].lower() in ["jpg","jpeg"]:
217 | in_path = in_folder + f
218 | if out_folder == "": out_folder = in_folder
219 | out_path = out_folder + f[:-4] + new_name_end + ".JPG"
220 |
221 | process_clahe(in_path, tileGridSize, grey=grey, out_path=out_path, clip_limit=clip_limit)
222 | except IndexError:
223 | pass
224 |
225 |
226 | def process_clahe(in_path, tileGridSize, grey=False, out_path="", clip_limit=2):
227 | """
228 | Appy CLAHE (contrast limited adaptive histogram equalization) method on an image
229 | for more information about CLAHE, see https://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html
230 |
231 | Overwriting image will raise an error, as the initial image is needed to copy-past metadata
232 | :param in_path: input image
233 | :param tileGridSize: size of the "blocks" to apply local histogram equalization
234 | :param grey: if True, the image will be converted to grayscale
235 | :param out_path: output path, the folders must exists and the image extension must be valid
236 | by default, output will be saved as input_path/input_name_clahe.JPG
237 | :param clip_limit: contrast limit, used to avoid too much noise
238 | """
239 | if out_path == "":
240 | out_path = ".".join(in_path.split(".")[:-1]) + "_clahe.JPG"
241 |
242 | # read input
243 | print("Processing CLAHE method on " + in_path.split("/")[-1])
244 | img = cv.imread(in_path)
245 |
246 | # convert color to gray
247 | if grey: img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
248 |
249 | # apply a median filter before clahe
250 | img = cv.medianBlur(img, 3)
251 |
252 | # create clahe object
253 | clahe = cv.createCLAHE(clipLimit=clip_limit, tileGridSize=(tileGridSize, tileGridSize)) # CLAHE
254 |
255 | # apply CLAHE for each image channel, and then recreate the full image (only useful if gray==False)
256 | channels_ini = cv.split(img)
257 | channels_final = []
258 | for channel in channels_ini:
259 | # Apply CLAHE
260 | channels_final.append(clahe.apply(channel))
261 | img_final = cv.merge(channels_final)
262 |
263 | # save image and write metadata from initial file
264 | cv.imwrite(out_path, img_final)
265 | pyxif.transplant(in_path, out_path)
266 |
267 | def blurr(filename,ksize = 3):
268 | image_bgr = cv.imread(filename) # todo la converstion en gray devrait être fait à cette ligne
269 | # image_gray = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY)
270 | return np.log(cv.Laplacian(image_bgr, cv.CV_64F,ksize=ksize).var())
--------------------------------------------------------------------------------
/build/lib/photo4d/Process.py:
--------------------------------------------------------------------------------
1 | # coding : uft8
2 | # Generic imports
3 | import os
4 | from shutil import rmtree, move
5 | # Photo4D imports
6 | from photo4d.Utils import exec_mm3d
7 | import re
8 | from shutil import copyfile
9 |
10 | def process_one_timestep(work_folder, pictures_array, timestep, output_folder,
11 | clahe=False, tileGridSize_clahe=8, zoomF=1,
12 | master_folder_id=0, Ori='Bascule', useMask=False, DefCor=0.0,
13 | shift=None, keep_rasters=True, display_micmac=False):
14 | os.chdir(work_folder)
15 | I, J = pictures_array.shape
16 |
17 | if pictures_array[timestep, 1]: #just skip if the set is invalid somehow
18 | # Setup parameters
19 | selected_line=pictures_array[timestep]
20 | img_set="("
21 | for j in range(2,J):
22 | img_set += selected_line[j] + "|"
23 | img_set = img_set[:-1] + ")"
24 |
25 | master_img=selected_line[master_folder_id + 2]
26 | date_str=selected_line[0]
27 | ply_name= date_str + '.ply'
28 |
29 | # Don't run again if already existing
30 | if os.path.exists(os.path.join(work_folder,ply_name)):
31 | move(os.path.join(work_folder,ply_name), os.path.join(output_folder,ply_name))
32 | if not os.path.exists(os.path.join(output_folder,ply_name)):
33 | # Define Malt command and run it
34 | if(useMask):
35 | copyfile(os.path.join(work_folder,'Mask.tif'),os.path.join(work_folder,master_img[:-4] +'_Masq.tif'))
36 | copyfile(os.path.join(work_folder,'Mask.xml'),os.path.join(work_folder,master_img[:-4] +'_Masq.xml'))
37 | command='mm3d Malt GeomImage {} {} Master={} DefCor={} ZoomF={}'.format(img_set, Ori,
38 | master_img, DefCor, zoomF)
39 | else:
40 | command='mm3d Malt GeomImage {} {} Master={} DefCor={} ZoomF={}'.format(img_set, Ori,
41 | master_img, DefCor, zoomF)
42 | print(command)
43 | success, error = exec_mm3d(command, display_micmac)
44 |
45 | if not (sucess == 0):
46 | print('Something went wrong :' + str(error))
47 | else:
48 | # Find the last depth map and correlation file
49 | # Get a list of all the files in the Malt output folder
50 | files=[]
51 | for f in os.walk(os.path.join(work_folder,'MM-Malt-Img-'+ master_img[:-4])):
52 | for file in f:
53 | files=file
54 | nuage_re = re.compile(r'NuageImProf_STD-MALT_Etape_\d{1}.xml')
55 | correlation_re = re.compile(r'Correl_STD-MALT_Num_\d{1}.tif')
56 | depth_re = re.compile(r'Z_Num\d{1}_DeZoom' + str(zoomF) +'_STD-MALT.tif')
57 | nuage_files = [ x for x in files if nuage_re.match(x)]
58 | correlation_files = [ x for x in files if correlation_re.match(x)]
59 | depth_files = [ x for x in files if depth_re.match(x)]
60 | sorted_nuage_files = sorted(nuage_files,reverse=True)
61 | sorted_correlation_files = sorted(correlation_files,reverse=True)
62 | sorted_depth_files = sorted(depth_files,reverse=True)
63 | last_nuage=sorted_nuage_files[0]
64 | last_cor=sorted_correlation_files[0]
65 | last_depth=sorted_depth_files[0]
66 |
67 | # Create the point cloud
68 | if shift is None:
69 | command = 'mm3d Nuage2Ply MM-Malt-Img-{}/{} Attr={} Out={}'.format(
70 | '.'.join(master_img.split('.')[:-1]), last_nuage, master_img, ply_name)
71 | else:
72 | command = 'mm3d Nuage2Ply MM-Malt-Img-{}/{} Attr={} Out={} Offs={}'.format(
73 | '.'.join(master_img.split('.')[:-1]), last_nuage, master_img, ply_name, str(shift).replace(" ", ""))
74 |
75 | print(command)
76 | success, error = exec_mm3d(command, True)
77 |
78 | # Copy result to result folder
79 | # .ply point cloud
80 | move(os.path.join(work_folder,ply_name), os.path.join(output_folder,ply_name))
81 | # If we want to keep the correlation map and the depth map
82 | if(keep_rasters):
83 | move(os.path.join(work_folder,'MM-Malt-Img-' + master_img,last_cor), os.path.join(output_folder,date_str + '_Correlation.tif'))
84 | move(os.path.join(work_folder,'MM-Malt-Img-' + master_img,last_depth), os.path.join(output_folder,date_str + '_DepthMap.tif'))
85 |
86 | # Clean-up
87 |
88 | try:
89 | rmtree(os.path.join(work_folder,'MM-Malt-Img-' + master_img[:-4]))
90 | except FileNotFoundError:
91 | pass
92 | except PermissionError:
93 | print("Permission Denied, cannot delete " + os.path.join(work_folder,'MM-Malt-Img-' + master_img))
94 | except OSError:
95 | pass
96 | try:
97 | rmtree(os.path.join(work_folder,"Pyram"))
98 | except PermissionError:
99 | print("Permission Denied, cannot delete Pyram folder")
100 | except OSError:
101 | pass
102 | # Go back to project folder
103 | os.chdir('../')
104 |
105 |
106 | def process_all_timesteps(work_folder, pictures_array, output_folder,
107 | clahe=False, tileGridSize_clahe=8, zoomF=1,
108 | master_folder_id=0, Ori='Bascule', useMask=False, DefCor=0.0,
109 | shift=None, keep_rasters=True, display_micmac=False):
110 | """
111 | Run MicMac (mm3d Malt) on all valid picture sets
112 | It is advised to give only absolute path in parameters
113 |
114 |
115 | :param work_folder: folder where the images and orientations are
116 | :param pictures_array: array with set definitions (also, validity of sets and timestamp)
117 | :param output_folder: directory for saving results
118 | :param clahe: if True, apply a "contrast limited adaptive histogram equalization" on the pictures before processing
119 |
120 | MicMac parameters: (see more documentation on official github and wiki (hopefully))
121 | :param zoomF: final zoom in the pyramidal correlation scheme
122 | :param master_folder_id: id of the folder containing the master images (central image of sets)
123 | :param Ori: Orientation to use for the correlation (Def='Bascule', the output of 'Class_photo4D.compute_transform(True)')
124 | :param DefCor: correlation threshold to reject area in the correlation process ([0-1] def=0)
125 | :param shift: shift for saving ply (if numbers are too big for 32 bit ply) [shiftE, shiftN, shiftZ]
126 | :param keep_rasters: keep the depth map and last correlation map
127 | :param display_micmac: show MicMac consol output, only usefull to follow individual set correlation status
128 | :return:
129 | """
130 | # ==================================================================================================================
131 | # checking path and parameters :
132 | nb_folders = len(pictures_array)
133 |
134 | if type(master_folder_id) != int or not (0 <= master_folder_id < nb_folders):
135 | print("Invalid value {} for parameter master folder, value set to 0".format(master_folder_id))
136 | print("must be one index of the array secondary_folder_list")
137 | master_folder_id = 0
138 |
139 | # make output folder if not already present
140 | if not os.path.exists(output_folder): os.makedirs(output_folder)
141 |
142 | # Go through set for each set
143 | I, J = pictures_array.shape
144 | for timestep in range(I):
145 | process_one_timestep(work_folder, pictures_array, timestep, output_folder,
146 | clahe=clahe, tileGridSize_clahe=tileGridSize_clahe, zoomF=zoomF,
147 | master_folder_id=master_folder_id, Ori=Ori, useMask=useMask, DefCor=DefCor,
148 | shift=shift, keep_rasters=keep_rasters, display_micmac=display_micmac)
149 |
150 |
--------------------------------------------------------------------------------
/build/lib/photo4d/Utils.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | """
3 | Some useful functions
4 | """
5 | from subprocess import Popen, PIPE, STDOUT
6 | import sys
7 | import numpy as np
8 |
9 |
10 |
11 | def exec_mm3d(command, display=True):
12 | """
13 | launch a MicMac command.
14 | As MicMac handle errors and wait for user's input, we needed a way to carry on when a command fails
15 | This function will kill the process if an error happens, allowing further process to be done
16 | :param command: MicMac command line, string beginning with mm3d
17 | to see every command allowed, see https://micmac.ensg.eu/index.php/Accueil, or Github
18 | :param display: display or not the logs from MicMac, boolean
19 | :return:
20 | """
21 | if(command[:5] != "mm3d "):
22 | print("WARNING The command must begin with mm3d\n")
23 | return 0, None
24 |
25 | process = Popen(command.split(" "), stdout=PIPE, stderr=STDOUT)
26 | for line_bin in iter(process.stdout.readline, b''):
27 | try:
28 | line = line_bin.decode(sys.stdout.encoding)
29 | if display:
30 | sys.stdout.write(line)
31 | # if waiting for input, which means an error was generated
32 | if '(press enter)' in line:
33 | print("Error in MicMac process, abort process")
34 | process.kill()
35 | return 1, None
36 | elif 'Warn tape enter to continue' in line:
37 | print("Value error in Tapas, abort process")
38 | process.kill()
39 | return 1,'Value error in Tapas'
40 | except UnicodeDecodeError:
41 | sys.stdout.write('---cannot decode this line---')
42 |
43 | # if no error occurs
44 | return 0, None
45 |
46 |
47 | def pictures_array_from_file(filepath):
48 | """
49 | Could be way more efficient ! And does not handle any error for the moment
50 | :param filepath:
51 | :return:
52 | """
53 | print("\nRetrieving data from file " + filepath + "\n.......................................")
54 | all_lines = []
55 | with open(filepath, 'r') as file:
56 | for line in file.readlines():
57 | if line[0] != "#" :
58 | list_temp = line.split(',')
59 | length = len(list_temp)
60 | array_line = np.empty(length, dtype=object)
61 | if list_temp[0].rstrip(" ").lower() == "true":
62 | array_line[0] = True
63 | else:
64 | array_line[0] = False
65 | for i in range(1, length):
66 | array_line[i] = list_temp[i].rstrip('\n')
67 | all_lines.append(array_line)
68 | print("Done")
69 | return np.array(all_lines)
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/build/lib/photo4d/XML_utils.py:
--------------------------------------------------------------------------------
1 | # coding :utf8
2 |
3 | from lxml import etree
4 | import os
5 | import pandas as pd
6 | import numpy as np
7 | import cv2 as cv
8 |
9 |
10 | # todo finir la traduction
11 | def extract_res(ori_folder_path, xmlfile='Residus.xml', do_txt=False, output_folder_path="",
12 | output_name="residuals_last_iter.txt",
13 | sep=',', ):
14 | """
15 | Read the residual file from MicMac and create a txt file (with separator, allows .csv) with only residuals of the
16 | last iteration : mean residual and residual for each picture.
17 |
18 | :param ori_folder_path: folder where is the xml residuals file, from MicMac, it is an orientation folder (name beginning with "Ori-)
19 | :param xmlfile: name of xml file, always 'Residus.xml' from MicMac
20 | ======
21 | these parameter are only usefull if do_txt==True, in this case a txt file will be writen
22 | :param do_txt: if False, no file will be written
23 | :param output_folder_path: folder where to save the txt file, by default same as ori_folder_path
24 | :param output_name: name of the output file, default "residuals_last_iter.txt"
25 | :param sep: separator of output file, default ','
26 | ======
27 | :return: 1 if failed, dictionary of residuals if no errors detected
28 | the dictionary have nb_inter, AverageResidual and names of the pictures as index
29 | the element corresponding to one picture is a tuple ('Name', 'Residual', 'PercOk', 'NbPts', 'NbPtsMul')
30 | (for more information about these values, see MicMac documentation)
31 | """
32 |
33 | file_content = ""
34 | dict = {}
35 |
36 | elements = ('Name', 'Residual', 'PercOk', 'NbPts', 'NbPtsMul')
37 | try:
38 | # Parsing of xml
39 | tree = etree.parse(ori_folder_path + xmlfile)
40 |
41 | # Getting number of iterations
42 | nb_iters = tree.xpath("/XmlSauvExportAperoGlob/Iters/NumIter")[-1].text
43 | file_content += 'nb_iters' + sep + nb_iters + '\n'
44 | dict['nb_iters'] = int(nb_iters)
45 |
46 | # Recuperation de la moyenne des residus de la derniere iteration
47 | av_resid = tree.xpath("/XmlSauvExportAperoGlob/Iters[NumIter={}][NumEtape=3]/\
48 | AverageResidual".format(nb_iters))[0].text
49 | file_content += 'AverageResidual' + sep + av_resid + '\n'
50 | dict['AverageResidual'] = float(av_resid)
51 |
52 | # Recuperation des donnees pour chaque image de la derniere iteration
53 | file_content += ('\nName{}Residual{}PercOk{}NbPts{}NbPtsMul\n'.format(sep, sep, sep, sep))
54 | for img in tree.xpath("/XmlSauvExportAperoGlob/Iters[NumIter={}]\
55 | [NumEtape=3]/OneIm".format(nb_iters)):
56 | obj = ''
57 | for e in elements:
58 | obj += img.find(e).text + sep
59 | file_content += obj + '\n'
60 | image_name = obj.split(sep)[0]
61 | dict[image_name] = obj.split(sep)[1:-1]
62 | except OSError:
63 | print("WARNING Can't open the file " + ori_folder_path + xmlfile)
64 | return 1
65 | except etree.XMLSyntaxError:
66 | print("WARNING The xml is not correct")
67 | return 1
68 |
69 | # write the txt file
70 | if do_txt:
71 | if output_folder_path == "":
72 | output_folder_path = ori_folder_path
73 |
74 | # Creation of the txt file
75 | try:
76 | with open(output_folder_path + output_name, "w") as file:
77 | file.write(file_content)
78 | except IOError:
79 | print("Cannot write file")
80 |
81 | return dict
82 |
83 |
84 | def read_S2D_xmlfile(file_path):
85 | """
86 | read a file containing pixel coordinates of points in images from the SaisieAppuis MicMac commands and put data
87 | into a dictionary
88 |
89 | :param file_path: path to the s2d xml file
90 | ========
91 | :return: a dictionary, pictures names are the indexes, and for each picture the element is a dictionary with points measurments
92 | todo cest vraiment trop mal ecrit !!!!!!
93 | { picture1 : {point1: (coordX, coordY),
94 | {point2: (coordX, coordY)}
95 | picture2 : {point1: (coordX, coordY),
96 | ...}
97 | ...}
98 | """
99 | dic_img_measures = {}
100 | # read the file
101 | try:
102 | with open(file_path, 'r'):
103 | # Parsing of xml
104 | tree = etree.parse(file_path)
105 | # loop on images
106 | for image in tree.xpath("/SetOfMesureAppuisFlottants/MesureAppuiFlottant1Im/NameIm"):
107 | dic_measures = {}
108 | # loop on the points
109 | for point in tree.xpath(
110 | "/SetOfMesureAppuisFlottants/MesureAppuiFlottant1Im[NameIm='{}']/OneMesureAF1I".format(
111 | image.text)):
112 | point_name = point[0].text.rstrip(" ")
113 | measure = point[1].text.split(" ")
114 | dic_measures[point_name] = (float(measure[0]), float(measure[1]))
115 |
116 | dic_img_measures[image.text] = dic_measures
117 |
118 | except etree.XMLSyntaxError:
119 | print("WARNING The xml file is not valid " + file_path)
120 | return
121 | except FileNotFoundError:
122 | print("WARNING Cannot find file S2D xml at " + file_path)
123 | return
124 |
125 | return dic_img_measures
126 |
127 |
128 | # todo faire qqchose de cette fonction qui craint...
129 |
130 | def count_tiepoints_from_txt(main_folder_path):
131 | """
132 | generate a panda DataFrame with the tie points found by the Tapioca command of MicMac USED WITH ExpTxt=1
133 |
134 | :param main_folder_path: path of the folder where is situated the Homol folder, or path of the Homol folder
135 | :return:
136 | - a panda DataFrame, row and column indexes are the name of pictures used, and for cell, the number
137 | of Tie points found in image row compared to img colum todo rendre ca lisible
138 | Img1 Img2 Img3
139 | Img1 0 nb1,2 nb1,3
140 | Img2 nb2,1 0 nb2,3
141 | Img3 nb3,1 nb3,1 0
142 | """
143 |
144 | # path checking
145 | if main_folder_path[-1] != "/": main_folder_path += "/"
146 | if main_folder_path.split("/")[-2] != "Homol":
147 | main_folder_path += "Homol/"
148 |
149 | try:
150 | folder_list = os.listdir(main_folder_path)
151 |
152 | # collect picture names in Homol directory, each folder is for one picture
153 | index = []
154 | for folder in folder_list:
155 | index.append(folder[6:]) # remove Pastis, the folder name being like PastisNamePicture
156 |
157 | df = pd.DataFrame(np.zeros((len(folder_list), len(folder_list))), index=index, columns=index)
158 |
159 | # count tie points
160 | s = 0 # total tie points
161 | for folder in folder_list:
162 | file_list = os.listdir(main_folder_path + folder)
163 | for filename in file_list:
164 | if filename.split('.')[-1] == 'txt':
165 | file = open(main_folder_path + folder + "/" + filename, 'r')
166 |
167 | # basically just counting the number of row in each file
168 | i = 0
169 | for line in file.readlines():
170 | i += 1
171 | s += 1
172 | df.loc[folder[6:], filename.rstrip('.txt')] = i
173 | if s == 0:
174 | print('\033[0;31m WARNING, 0 Tie Points found, please check that ExptTxt=1 in Tapioca \033[0m')
175 | return df, s
176 | except IOError:
177 | print('\033[0;31m' + "Cannot open " + main_folder_path + '\033[0m')
178 |
179 |
180 | def get_tiepoints_from_txt(path):
181 | point_list = []
182 | with open(path) as f:
183 | for line in f.readlines():
184 | point_list.append(line.split(" "))
185 | return np.array(point_list).astype(np.float)
186 |
187 |
188 | def write_S2D_xmlfile(dico_img_measures, file_name):
189 | """
190 | Write an xml file with 2D mesures of points in different images, in a way that MicMac can read it
191 | :param dico_img_measures: dictionnary containing 2D measures. Must looks like :
192 | {NameImage1 (String) : {NamePoint1 (String) : (measureX, measureY) (tuple of float),
193 | NamePoint2 (String) : (measureX, measureY) (tuple of float), todo la doc est a continuer
194 | ...},
195 | NameImage2 (String) : {NamePoint1 (String) : measure (String, 'coordPoint1Image2 coordPoint1Image2'),
196 | NamePoint2 (String) : measure (String, 'coordPoint2Image2 coordPoint2Image2'),
197 | ...}, ...}
198 | :param file_name: path or name of the output file
199 | """
200 | # Creation of the document root
201 | measures_set = etree.Element('SetOfMesureAppuisFlottants')
202 |
203 | # iterate over pictures
204 | for image, dico_measures in dico_img_measures.items():
205 |
206 | img_meas = etree.SubElement(measures_set, 'MesureAppuiFlottant1Im')
207 | name_img = etree.SubElement(img_meas, 'NameIm')
208 | name_img.text = image
209 |
210 | # iterate over measures for each picture
211 | for point, measure in dico_measures.items():
212 |
213 | pt_mes = etree.SubElement(img_meas, 'OneMesureAF1I')
214 | etree.SubElement(pt_mes, 'NamePt').text = point
215 | coord_img_pt = etree.SubElement(pt_mes, 'PtIm')
216 | coord_img_pt.text = "{} {}".format(measure[0], measure[1])
217 |
218 | # open the file for writing
219 | try:
220 | with open(file_name, 'w') as file:
221 | # Header
222 | file.write('\n')
223 | # Writing all the text we created
224 | file.write(etree.tostring(measures_set, pretty_print=True).decode('utf-8'))
225 | except IOError:
226 | print('Error while writing file')
227 | return
228 |
229 |
230 | def write_masq_xml(tif_mask, output=""):
231 | """
232 | write default xml file describing the mask from MicMac
233 | Even if this file seems useless, MicMac can throw an error without this file associated to the mask (in Malt)
234 |
235 | :param tif_mask: path to the MicMac mask, in .tif format
236 | :param output: path for output xml file
237 | """
238 | # do some checks
239 | if tif_mask.split('.')[-1] not in ["tif", "tiff"]:
240 | print("Wrong input path " + tif_mask + "\n Must be a .tif file")
241 | return
242 | if output == "":
243 | output = '.'.join(tif_mask.split('.')[:-1]) + ".xml"
244 | elif output.split('.')[-1] != "xml":
245 | print("Wrong output path " + output + "\n Must be a .xml file")
246 | return
247 |
248 | file_ori = etree.Element('FileOriMnt')
249 | name = etree.SubElement(file_ori, 'NameFileMnt')
250 | name.text = tif_mask
251 |
252 | nb_pix = etree.SubElement(file_ori, 'NombrePixels')
253 | shape = cv.imread(
254 | tif_mask).shape # todo find a easier way
255 | nb_pix.text = "{} {}".format(shape[1], shape[0])
256 | # write some default values
257 | etree.SubElement(file_ori, 'OriginePlani').text = "0 0"
258 | etree.SubElement(file_ori, 'ResolutionPlani').text = "1 1"
259 | etree.SubElement(file_ori, 'OrigineAlti').text = "0"
260 | etree.SubElement(file_ori, 'ResolutionAlti').text = "1"
261 | etree.SubElement(file_ori, 'Geometrie').text = "eGeomMNTFaisceauIm1PrCh_Px1D"
262 |
263 | # write the xml file
264 | try:
265 | with open(output, 'w') as file:
266 | file.write('\n')
267 | file.write(etree.tostring(file_ori, pretty_print=True).decode('utf-8'))
268 | except IOError:
269 | print('Error while writing file')
270 | return
271 |
272 |
273 | def change_Ori(initial_pictures, final_pictures, ori_folder_path):
274 | """
275 | Changes all the files of an Ori- folder from MicMac, in the way that every reference to initial pictures
276 | is replaced by reference to final_pictures
277 | WARNING this will totally modify the folder without backup of the initial one, think about make a copy first
278 | :param initial_pictures: list of initial pictures to be replaced, in the same order as the final one
279 | :param final_pictures: list of initial pictures to be replaced, in the same order as the final one
280 | :param ori_folder_path: path of the orientation folder (name beginning with Ori- )
281 | """
282 | # some checks
283 | print( os.path.basename(ori_folder_path)[:4])
284 | if os.path.basename(ori_folder_path)[:4] != "Ori-":
285 | print("Ori path is not valid : {}\nYou need to enter the path to the Ori-folder ".format(ori_folder_path))
286 | return
287 | elif len(initial_pictures) != len(final_pictures):
288 | print("List of input and output pictures must have the same size")
289 | return
290 | nb_pictures = len(initial_pictures)
291 |
292 | # change orientation files
293 | for j in range(nb_pictures):
294 | # rename Orientation files
295 | if os.path.exists(os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(initial_pictures[j]))):
296 | os.rename(os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(initial_pictures[j])),
297 | os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(final_pictures[j])))
298 |
299 | # write a short summary
300 | with open(os.path.join(ori_folder_path,"log.txt"), 'w') as log:
301 | log.write("This orientation was not calculated by MicMac with these pictures\n\n")
302 | log.write("The names of pictures were just changed \n\n")
303 | for i in range(nb_pictures):
304 | log.write("{} was replaced by {}\n".format(initial_pictures[i], final_pictures[i]))
305 |
306 |
307 | def change_xml(initial_pictures, final_pictures, xml_path):
308 | """
309 | Replace all occurrences of initial pictures with final pictures in a xml/txt file
310 | initial pictures[i] will be replaced by final_pictures[i]
311 |
312 | :param initial_pictures: list of pictures to be replaced
313 | :param final_pictures: list of replacement pictures, in the same order as initial pictures
314 | :param xml_path: path to the file to process
315 | """
316 | # checking length
317 | if len(initial_pictures) != len(final_pictures):
318 | print("List of input and output pictures must have the same size")
319 | return
320 | nb_pictures = len(initial_pictures)
321 |
322 | # Read the xml file
323 | with open(xml_path, 'r') as file:
324 | file_data = file.read()
325 | for i in range(nb_pictures):
326 | # Replace the target string
327 | file_data = file_data.replace(initial_pictures[i], final_pictures[i])
328 | # Write the file out again
329 | with open(xml_path, 'w') as file:
330 | file.write(file_data)
331 |
332 |
333 | def write_couples_file(file_path, master_image, pictures_list):
334 | """
335 | write an xml file for micmac command Tapioca, for pictures linked to one image
336 |
337 | :param file_path: path to xml file, if it already exists, it will be replaced
338 | :param master_image: image to compare with all the others
339 | :param pictures_list:
340 | """
341 | root = etree.Element('SauvegardeNamedRel')
342 | for img in pictures_list:
343 | couple = etree.SubElement(root, 'Cple')
344 | couple.text = str(master_image) + " " + str(img)
345 | with open(file_path, 'w') as xml:
346 | xml.write('\n')
347 | xml.write(etree.tostring(root, pretty_print=True).decode('utf-8'))
348 |
349 |
350 | if __name__ == "__main__":
351 | print(read_S2D_xmlfile(
352 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/all_points-S2D_dist_max.xml"))
353 |
--------------------------------------------------------------------------------
/build/lib/photo4d/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArcticSnow/photo4D/2210683e3d352ffb648962ff043d14e2aa24a80f/build/lib/photo4d/__init__.py
--------------------------------------------------------------------------------
/build/lib/photo4d/__version__.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | VERSION = (0, 2, 2)
4 |
5 | __version__ = '.'.join(map(str, VERSION))
6 |
--------------------------------------------------------------------------------
/build/lib/photo4d/pdal_python_filter.py:
--------------------------------------------------------------------------------
1 | '''
2 | Custom python filters for pdal
3 |
4 | '''
5 | import numpy as np
6 | import matplotlib.colors as cl
7 | import pandas as pd
8 | #import pdb
9 |
10 | def add_XY_UTM(ins, outs):
11 | X = ins['X']
12 | X += float(pdalargs['x_offset'])
13 | Y = ins['Y']
14 | Y += float(pdalargs['y_offset'])
15 | outs['Y'] = Y
16 | outs['X'] = X
17 | return True
18 |
19 |
20 | def voxelGrid(ins, outs):
21 |
22 | ROI = [float(pdalargs['Xmin']),
23 | float(pdalargs['Xmax']),
24 | float(pdalargs['Ymin']),
25 | float(pdalargs['Ymax']),
26 | float(pdalargs['Zmin']),
27 | float(pdalargs['Zmax'])]
28 | leaf = float(pdalargs['leaf'])
29 |
30 | df = pd.DataFrame({'X' : ins['X'] ,
31 | 'Y' : ins['Y'] ,
32 | 'Z' : ins['Z'],
33 | 'Red':ins['Red'],
34 | 'Green':ins['Green'],
35 | 'Blue':ins['Blue']})
36 |
37 | for i in range(0,6):
38 | if ROI[i]==-9999:
39 | if i==0:
40 | ROI[i] = df.iloc[:,0].min()
41 | elif i==1:
42 | ROI[i] = df.iloc[:,0].max()
43 | elif i==2:
44 | ROI[i] = df.iloc[:,1].min()
45 | elif i==3:
46 | ROI[i] = df.iloc[:,1].max()
47 | elif i==4:
48 | ROI[i] = df.iloc[:,2].min()
49 | elif i==5:
50 | ROI[i] = df.iloc[:,2].max()
51 |
52 | #print(ROI)
53 | nx = np.int((ROI[1]-ROI[0])/leaf)
54 | ny = np.int((ROI[3]-ROI[2])/leaf)
55 | nz = np.int((ROI[5]-ROI[4])/leaf)
56 |
57 | bins_x = np.linspace(ROI[0], ROI[1], nx+1)
58 | df['x_cuts'] = pd.cut(df.X,bins_x, labels=False)
59 | bins_y = np.linspace(ROI[2],ROI[3], ny+1)
60 | df['y_cuts'] = pd.cut(df.Y,bins_y, labels=False)
61 | bins_z = np.linspace(ROI[4],ROI[5], nz+1)
62 | df['z_cuts'] = pd.cut(df.Z,bins_z, labels=False)
63 |
64 | grouped = df.groupby([df['x_cuts'],df['y_cuts'], df['z_cuts']])
65 |
66 | outf = pd.DataFrame()
67 | outf['X'] = np.hstack((grouped.X.mean().reset_index().X, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
68 | outf['Y'] = np.hstack((grouped.Y.mean().reset_index().Y, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
69 | outf['Z'] = np.hstack((grouped.Z.mean().reset_index().Z, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
70 | outf['Red'] = np.hstack((grouped.Red.mean().reset_index().Red, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
71 | outf['Green'] = np.hstack((grouped.Green.mean().reset_index().Green, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
72 | outf['Blue'] = np.hstack((grouped.Blue.mean().reset_index().Blue, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
73 | outf['Classification'] = (outf.X==-9999)*13
74 | outf = outf.dropna()
75 |
76 | outs['X'] = np.array(outf.X.astype(' thresh) * 3
133 | ground = (hsv[:,2] <= thresh) * 2
134 |
135 | dt = np.dtype(('u1'))
136 | snow = snow.astype(dt)
137 | ground = ground.astype(dt)
138 | cls = snow + ground
139 |
140 | outs['Classification'] = cls
141 |
142 | return True
143 |
144 |
145 | def mask_ground(ins, outs):
146 | "Mask to keep ground patches only based on point Value in the HSV colorspace"
147 | thresh = 180
148 |
149 | rgb = np.vstack((ins['Red'], ins['Green'], ins['Blue'])).T
150 | hsv = cl.rgb_to_hsv(rgb)
151 | ground = hsv[:,2] <= thresh
152 | outs['Mask'] = ground
153 |
154 | return True
155 |
156 |
157 | def rgb2value(ins, outs):
158 | rgb = np.vstack((ins.get('Red'), ins.get('Green'), ins.get('Blue'))).T
159 | hsv = cl.rgb_to_hsv(rgb)
160 | #pdb.set_trace()
161 | outs['Value'] = hsv[:,2].astype('float64')
162 |
163 | return True
164 |
165 |
166 |
167 | def mask_snow(ins, outs):
168 | "Mask to keep ground patches only based on point Value in the HSV colorspace"
169 | thresh = 180
170 |
171 | rgb = np.vstack((ins['Red'], ins['Green'], ins['Blue'])).T
172 | hsv = cl.rgb_to_hsv(rgb)
173 | snow = hsv[:,2] > thresh
174 | outs['Mask'] = snow
175 |
176 | return True
177 |
--------------------------------------------------------------------------------
/dist/photo4d-0.2.2-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArcticSnow/photo4D/2210683e3d352ffb648962ff043d14e2aa24a80f/dist/photo4d-0.2.2-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/dist/photo4d-0.2.2.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArcticSnow/photo4D/2210683e3d352ffb648962ff043d14e2aa24a80f/dist/photo4d-0.2.2.tar.gz
--------------------------------------------------------------------------------
/photo4d.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: photo4d
3 | Version: 0.2.2
4 | Summary: Open source project to perform time-lapse photogrammetry
5 | Home-page: https://github.com/ArcticSnow/photo4D
6 | Author: S. Filhol, A. Perret, G. Sutter, and L. Girod
7 | Author-email: simon.filhol@geo.uio.no
8 | License: MIT
9 | Description:
10 | # Photo4D: open-source time-lapse photogrammetry
11 |
12 | Contributors by alphabetical orders:
13 | - Simon Filhol (simon.filhol@geo.uio.no)
14 | - Luc Girod (luc.girod@geo.uio.no)
15 | - Alexis Perret (aperret2010@hotmail.fr)
16 | - Guillaume Sutter (sutterguigui@gmail.com)
17 |
18 | ## Description
19 |
20 | This project consists of an automated program to generate point cloud from time-lapse set of images from independent cameras. The software:
21 | 1. sorts images by timestamps,
22 | 2. assess the image quality based on lumincace and bluriness,
23 | 3. identify automatically GCPs through the stacks of images,
24 | 4. run Micmac to compute point clouds, and
25 | 5. convert point cloud to rasters. (not implemented)
26 |
27 | The project should be based on open-source libraries, for public release.
28 |
29 | ## Reference
30 |
31 | Filhol, S., Perret, A., Girod, L., Sutter, G., Schuler, T. V., and Burkhart, J. F.. ( 2019), Time‐lapse Photogrammetry of Distributed Snowdepth During Snowmelt. Water Resour. Res., 55. https://doi.org/10.1029/2018WR024530
32 |
33 | URL: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2018WR024530
34 |
35 |
36 | ## Installation
37 | 1. install the latest version of [micmac](https://micmac.ensg.eu/index.php/Install)
38 |
39 | 2. install python 3.6, and with anaconda, create a virtual environment with the following packages:
40 | - opencv
41 | - pandas
42 | - matplotlib
43 | - lxml
44 | - pillow
45 | - [pyxif](https://github.com/zenwerk/Pyxif) (that needs to be downloaded from https://github.com/zenwerk/Pyxif)
46 | ```sh
47 | wget https://github.com/zenwerk/Pyxif/archive/master.zip
48 | unzip master.zip
49 | cd Pyxif-master
50 | mv LICENCE.txt LICENSE.txt # As there is a typo in the License filename
51 | python setup.py install
52 | ```
53 | - [PDAL](https://pdal.io/)
54 | - json
55 |
56 | 3. The package is available via Pypi
57 |
58 | ```python
59 | pip install photo4d
60 | ```
61 |
62 | ## Usage
63 |
64 | ### 1. prepare your environment:
65 | - create a Python >= 3.6 virtual environment in which you install the required libraries (see above)
66 | - create a folder for the project with inside the project folder a folder called Images containing itself one folder per
67 | - Organize your photo with one folder per camera. For instance folder /cam1 constains all the images from Camera 1.
68 | camera
69 |
70 | ```bash
71 | ├── Project
72 | └── Images
73 | ├── Cam1
74 | ├── Cam2
75 | ├── Cam3
76 | └── Cam...
77 | ```
78 |
79 |
80 | ### 2. Use the Photo4d class to process the images through MicMac
81 |
82 | Set the path correctly in the file MicmacApp/Class_photo4D.py, and follow these steps
83 |
84 | ```python
85 |
86 | ############################################################
87 | ## Part 1
88 |
89 | import photo4d as p4d
90 |
91 | # Create a new photo4d object by indicating the Project path
92 | myproj = p4d.Photo4d(project_path="point to project folder /Project")
93 |
94 | # Algorithm to sort images in triplets, and create the reference table with sets :date, valid set, image names
95 | myproj.sort_picture()
96 |
97 | # Algorithm to check picture quality (exposure and blurriness)
98 | myproj.check_picture_quality()
99 |
100 | ############################################################
101 | ## Part 2: Estimate camera orientation
102 |
103 | # Compute camera orientation using the timeSIFT method:
104 | myproj.timeSIFT_orientation()
105 |
106 | # Convert a text file containing the GCP coordinates to the proper format (.xml) for Micmac
107 | myproj.prepare_gcp_files(path_to_GCP_file, file_format="N_X_Y_Z")
108 |
109 | # Select a set to input GCPs
110 | myproj.set_selected_set("DSC02728.JPG")
111 |
112 | # Input GCPs in 3 steps
113 | # first select 3 to five GCPs to pre-orient the images
114 | myproj.pick_initial_gcps()
115 |
116 | # Apply transformation based on the few GCPs previously picked
117 | myproj.compute_transform()
118 |
119 | # Pick additionnal GCPs, that are now pre-estimated
120 | myproj.pick_all_gcps()
121 |
122 | ############################################################
123 | ## Part2, optional: pick GCPs on extre image set
124 | ## If you need to pick GCPs on another set of images, change selected set (this can be repeated n times):
125 | #myproj.compute_transform()
126 | #myproj.set_selected_set("DSC02871.JPG")
127 | #myproj.pick_all_gcps()
128 |
129 | # Compute final transform using all picked GCPs
130 | myproj.compute_transform(doCampari=True)
131 |
132 | ## FUNCTION TO CHANGE FOR TIMESIFT
133 | # myproj.create_mask() #To be finished
134 |
135 | ############################################################
136 | ## Part3: Compute point clouds
137 |
138 | # Compute point cloud, correlation matrix, and depth matrix for each set of image
139 | myproj.process_all_timesteps()
140 |
141 | # Clean (remove) the temporary working direction
142 | myproj.clean_up_tmp()
143 |
144 | ```
145 |
146 | ### 3. Process the point clouds with [PDAL](https://pdal.io/)
147 |
148 | **Currently Under Development**
149 |
150 | [PDAL](https://pdal.io/) is a python library to process point cloud. It has an extensive library of algorithms available, and here we wrapped a general method to filter and extract Digital Elevation Models (DEMs) from the point clouds derived in the previous step.
151 |
152 | Micmac produces point clouds in the format `.ply`. The functions in the python class `pcl_process()` can convert, filter and crop the `.ply` point clouds and save them as `.las` files. Then the function `convert_all_pcl2dem()` will convert all point clouds stored in `my_pcl.las_pcl_flist` to DEMs.
153 |
154 | With the function `my_pcl.custom_pipeline()`, it is possible to build custom processing pipeline following the PDAL JSON syntax. This pipeline can then be executed by running the function `my_pcl.apply_custom_pipeline()`.
155 |
156 | See the source file [Class_pcl_processing.py](./photo4d/Class_pcl_processing.py) for more details.
157 |
158 | ```python
159 |
160 | # Create a pcl_class object, indication the path to the photo4d project
161 | my_pcl = p4d.pcl_process(project_path="path_to_project_folder")
162 |
163 | my_pcl.resolution = 1 # set the resolution of the final DEMs
164 |
165 | # Set the bounding box the Region of Interest (ROI)
166 | my_pcl.crop_xmin = 416100
167 | my_pcl.crop_xmax = 416900
168 | my_pcl.crop_ymin = 6715900
169 | my_pcl.crop_ymax = 6716700
170 | my_pcl.nodata = -9999
171 |
172 | # add path og the .ply point cloud files to the python class
173 | my_pcl.add_ply_pcl()
174 |
175 | # filter the point clouds with pdal routine, and save resulting point clouds as .las file
176 | my_pcl.filter_all_pcl()
177 |
178 | # add path of the .las files
179 | my_pcl.add_las_pcl()
180 |
181 | # conver the .las point clouds to DEMs (geotiff)
182 | my_pcl.convert_all_pcl2dem()
183 |
184 | # Extract Value orthophoto from RGB
185 | my_pcl.extract_all_ortho_value()
186 |
187 | ```
188 |
189 | After this section you have clean point clouds, as well as DEMs in GeoTiff ready!
190 |
191 |
192 | ## Ressources
193 |
194 | - Micmac: http://micmac.ensg.eu/index.php/Accueil
195 | - Image processing images: skimage, openCV, Pillow
196 | - python package to read exif data: https://pip.pypa.io/en/latest/user_guide/
197 |
198 | ## Development
199 |
200 | Message us to be added as a contributor, then if you can also modify the code to your own convenience with the following steps:
201 |
202 | To work on a development version and keep using the latest change install it with the following
203 |
204 | ```shell
205 | git clone git@github.com:ArcticSnow/photo4D.git
206 | pip install -e [path2folder/photo4D]
207 | ```
208 |
209 | and to upload latest change to Pypi.org, simply:
210 |
211 | 1. change the version number in the file ```photo4d/__version__.py```
212 | 2. run from a terminal from the photo4D folder, given your $HOME/.pyc is correctly set:
213 |
214 | ```shell
215 | python setup.py upload
216 | ```
217 |
218 |
219 | Platform: UNKNOWN
220 | Classifier: License :: OSI Approved :: MIT License
221 | Classifier: Programming Language :: Python
222 | Classifier: Programming Language :: Python :: 3
223 | Classifier: Programming Language :: Python :: 3.6
224 | Classifier: Programming Language :: Python :: Implementation :: CPython
225 | Classifier: Programming Language :: Python :: Implementation :: PyPy
226 | Requires-Python: >=3.6.0
227 | Description-Content-Type: text/markdown
228 | Provides-Extra: Required to use the class pcl_process()
229 |
--------------------------------------------------------------------------------
/photo4d.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | LICENSE
2 | MANIFEST.in
3 | README.md
4 | setup.py
5 | photo4d/Class_pcl_processing.py
6 | photo4d/Class_photo4D.py
7 | photo4d/Detect_Sift.py
8 | photo4d/Image_utils.py
9 | photo4d/Process.py
10 | photo4d/Utils.py
11 | photo4d/XML_utils.py
12 | photo4d/__init__.py
13 | photo4d/__version__.py
14 | photo4d/pdal_python_filter.py
15 | photo4d.egg-info/PKG-INFO
16 | photo4d.egg-info/SOURCES.txt
17 | photo4d.egg-info/dependency_links.txt
18 | photo4d.egg-info/requires.txt
19 | photo4d.egg-info/top_level.txt
--------------------------------------------------------------------------------
/photo4d.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/photo4d.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | lxml
2 | pandas
3 | numpy
4 | matplotlib
5 | opencv-python
6 | pillow
7 |
8 | [Required to use the class pcl_process()]
9 | pdal
10 | json
11 |
--------------------------------------------------------------------------------
/photo4d.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | photo4d
2 |
--------------------------------------------------------------------------------
/photo4d/Class_pcl_processing.py:
--------------------------------------------------------------------------------
1 | '''
2 | Class and functions to process the point clouds
3 | Simon. Filhol, November 2019
4 |
5 | '''
6 |
7 | import pdal, json, glob, os
8 |
9 |
10 | class pcl_process(object):
11 |
12 | def __init__(self, project_path, ext='ply'):
13 |
14 | if not os.path.exists(project_path):
15 | print("ERROR The path " + project_path + " doesn't exists")
16 | return
17 | else:
18 | self.project_path = project_path
19 | os.chdir(self.project_path)
20 |
21 | # parameters for point cloud filtering
22 | self.x_offset = 410000 # these values are default for Finse, NORWAY
23 | self.y_offset = 6710000 # these values are default for Finse, NORWAY
24 |
25 | # Cropping area
26 | self.crop_xmin = 416100
27 | self.crop_xmax = 416900
28 | self.crop_ymin = 6715900
29 | self.crop_ymax = 6716700
30 | self.crop_zmin = 1200
31 | self.crop_zmax = 1500
32 |
33 | # Raster bounding box, default is same as cropping box.
34 | self.raster_xmin = self.crop_xmin
35 | self.raster_xmax = self.crop_xmax
36 | self.raster_ymin = self.crop_ymin
37 | self.raster_ymax = self.crop_ymax
38 |
39 | # parameters for conversion to GeoTiff
40 | self.resolution = 1
41 | self.radius = self.resolution * 1.4
42 | self.nodata = -9999
43 | self.gdaldriver = "GTiff"
44 | self.output_type = ["min", "max", "mean", "idw", "count", "stdev"]
45 |
46 | def add_ply_pcl(self):
47 | os.chdir(self.project_path)
48 | self.ply_pcl_flist = glob.glob("*.ply")
49 | print("=======================\n PLY point clouds added: ")
50 | for file in self.ply_pcl_flist:
51 | print(file)
52 | print(".......................")
53 | print(str(self.ply_pcl_flist.__len__()) + " point clouds added")
54 | print("=======================")
55 |
56 | def add_las_pcl(self):
57 | os.chdir(self.project_path)
58 | self.las_pcl_flist = glob.glob("*.las")
59 | print("=======================\n LAS point clouds added: ")
60 | for file in self.las_pcl_flist:
61 | print(file)
62 | print(".......................")
63 | print(str(self.las_pcl_flist.__len__()) + " point clouds added")
64 | print("=======================")
65 |
66 | #@staticmethod
67 | def pipeline_realization(self, pip_json, print_result=True):
68 | try:
69 | # ===============================================
70 | # Pipeline execution
71 | print('start')
72 | pipeline = pdal.Pipeline(pip_json)
73 | print(pipeline)
74 | print("here")
75 |
76 | pipeline.validate() # check if our JSON and options were good
77 | print("there")
78 |
79 | pipeline.execute()
80 |
81 | if print_result:
82 | arrays = pipeline.arrays
83 | metadata = pipeline.metadata
84 | log = pipeline.log
85 | print("\n================")
86 | print("Arrays:")
87 | print(arrays)
88 | print("\n================")
89 | print("Metadata:")
90 | print(metadata)
91 | print("\n================")
92 | print("Log:")
93 | print(log)
94 |
95 | print("pdal pipeline finished")
96 | return True
97 | except:
98 | print(" Error !!")
99 | return False
100 |
101 | def filter_pcl(self, file_input, file_output, print_result=True):
102 | '''
103 | Function to filter a point cloud: cropping to ROI, removing statistically outliers, saving output to .las format
104 | '''
105 | pip_filter_json = json.dumps(
106 | {
107 | "pipeline":
108 | [
109 | file_input,
110 | {
111 | "type": "filters.python",
112 | "script": "pdal_python_filter.py",
113 | "function": "add_XY_UTM",
114 | "pdalargs": {"x_offset": self.x_offset, "y_offset": self.y_offset}
115 | },
116 | {
117 | "type": "filters.crop",
118 | "bounds": str(([self.crop_xmin, self.crop_xmax], [self.crop_ymin, self.crop_ymax]))
119 | },
120 | {
121 | "type": "filters.range",
122 | "limits": "Z[" + str(self.crop_zmin) + ":" + str(self.crop_zmax) + "]"
123 | },
124 | {
125 | "type": "filters.lof",
126 | "minpts": 20
127 | },
128 | {
129 | "type": "filters.range",
130 | "limits": "LocalOutlierFactor[:1.2]"
131 | },
132 | {
133 | "type": "filters.range",
134 | "limits": "Classification[7:12]"
135 | },
136 | {
137 | "type": "writers.las",
138 | "filename": file_output,
139 | "scale_x": 1,
140 | "scale_y": 1,
141 | "scale_z": 1
142 | }
143 | ]
144 | }
145 | )
146 |
147 |
148 | if print_result:
149 | print(pip_filter_json)
150 | self.pipeline_realization(pip_filter_json, print_result=print_result)
151 |
152 | def filter_all_pcl(self, print_result=True):
153 | '''
154 | Function to process all pcl with filter_pcl() function
155 | '''
156 | print("=======================")
157 | for file in self.ply_pcl_flist:
158 | self.filter_pcl(file, file[:-4] + '_clean.las', print_result=print_result)
159 | print(".......................")
160 | print("All PLY files filtered")
161 | print("=======================")
162 |
163 | def convert_pcl2dem(self, input_file, output_file, print_result=True):
164 | '''
165 | Function to convert .las point cloud to a raster (.tif)
166 | '''
167 | pip_dem = json.dumps(
168 | {
169 | "pipeline": [
170 | {"type": "readers.las",
171 | "filename": input_file
172 | },
173 | {
174 | "filename": output_file,
175 | "gdaldriver": "GTiff",
176 | "output_type": "all",
177 | "resolution": self.resolution,
178 | "radius": self.radius,
179 | "bounds": str(([self.raster_xmin, self.raster_xmax], [self.raster_ymin, self.raster_ymax])),
180 | "type": "writers.gdal",
181 | "nodata": self.nodata
182 | }
183 | ]
184 | })
185 | self.pipeline_realization(pip_dem, print_result=print_result)
186 |
187 | def convert_all_pcl2dem(self, print_result=True):
188 | '''
189 | Function to process all pcl with filter_pcl() function
190 | '''
191 | print("=======================")
192 | for file in self.las_pcl_flist:
193 | self.convert_pcl2dem(file, file[:-4] + '_' + str(self.resolution) + 'm.tif', print_result=print_result)
194 | print(".......................")
195 | print("All LAS converted to DEMs")
196 | print("=======================")
197 |
198 | def extract_ortho_value(self, input_file, output_file, print_result=True):
199 | '''
200 | Function to convert .las point cloud to a raster (.tif)
201 | '''
202 | pip_ortho = json.dumps(
203 | {
204 | "pipeline":[
205 | {"type": "readers.las",
206 | "filename": input_file
207 | },
208 | {
209 | "type": "filters.python",
210 | "script": "pdal_python_filter.py",
211 | "function": "rgb2value",
212 | "add_dimension": "Value",
213 | "module": "anything"
214 | },
215 | {
216 | "filename": output_file, #file.split('.')[0] + '_' + str(resolution) + 'm_value.tif',
217 | "gdaldriver": "GTiff",
218 | "output_type": "mean",
219 | "dimension" : "Value",
220 | "resolution": self.resolution,
221 | "radius": self.radius,
222 | "bounds": str(([self.raster_xmin, self.raster_xmax], [self.raster_ymin, self.raster_ymax])),
223 | "type": "writers.gdal",
224 | "nodata": self.nodata
225 | }
226 | ]
227 | })
228 | self.pipeline_realization(pip_ortho, print_result=print_result)
229 |
230 | def extract_all_ortho_value(self, print_result=True):
231 | '''
232 | Function to process all pcl and derive an orthophoto containing the Value (computed from RGB to HSV)
233 | '''
234 |
235 | print("=======================")
236 | for file in self.las_pcl_flist:
237 | self.extract_ortho_value(file, file[:-4] + '_' + str(self.resolution) + 'm_value.tif', print_result=print_result)
238 | print(".......................")
239 | print("All LAS converted to Value orthophoto (monochrome)")
240 | print("=======================")
241 |
242 | @staticmethod
243 | def custom_pipeline(json_pipeline):
244 | '''
245 | Function to enter a custom made pdal pipeline. Input should be a Json format pipeline following the format compatible with PDAL instructions
246 | '''
247 | return json.dumps(json_pipeline)
248 |
249 | def apply_custom_pipeline(self, pipeline, file_list=None, print_result=True):
250 | """
251 | Function to apply a custom pipeline to
252 | """
253 | if file_list is None:
254 | file_list = self.las_pcl_flist
255 |
256 | print("=======================")
257 | for file in file_list:
258 | self.pipeline_realization(pipeline, print_result=print_result)
259 | print(".......................")
260 | print("Custom pipeline applied to all files")
261 | print("=======================")
262 |
263 | if __name__ == "__main__":
264 |
265 | # Create a pcl_class object, indication the path to the photo4d project
266 | my_pcl = pcl_process(project_path="path_to_project_folder")
267 |
268 | my_pcl.resolution = 1 # set the resolution of the final DEMs
269 |
270 | # Set the bounding box the Region of Interest (ROI)
271 | my_pcl.crop_xmin = 416100
272 | my_pcl.crop_xmax = 416900
273 | my_pcl.crop_ymin = 6715900
274 | my_pcl.crop_ymax = 6716700
275 | my_pcl.nodata = -9999
276 |
277 | # add path og the .ply point cloud files to the python class
278 | my_pcl.add_ply_pcl()
279 |
280 | # filter the point clouds with pdal routine, and save resulting point clouds as .las file
281 | my_pcl.filter_all_pcl()
282 |
283 | # add path of the .las files
284 | my_pcl.add_las_pcl()
285 |
286 | # conver the .las point clouds to DEMs (geotiff)
287 | my_pcl.convert_all_pcl2dem()
288 |
289 | # Extract Value orthophoto from RGB
290 | my_pcl.extract_all_ortho_value()
291 |
292 | ###########
293 | # Custom processing pdal pipeline
294 |
295 |
296 |
297 |
--------------------------------------------------------------------------------
/photo4d/Class_photo4D.py:
--------------------------------------------------------------------------------
1 | '''
2 | Program XXX Part I
3 |
4 | TODO:
5 | - add the possibility to treat each camera independently with the file MicMac-LocolChantierDescriptuer.xml
6 | Generate automaticaly this xml file based on the camera names, and number
7 |
8 |
9 | '''
10 |
11 | # import public library
12 | import os, glob
13 | from os.path import join as opj
14 | import numpy as np
15 | import pandas as pd
16 | from typing import Union
17 | from shutil import copyfile, rmtree, copytree
18 | from distutils.dir_util import copy_tree
19 |
20 | # Import project libary
21 | import photo4d.Process as proc
22 | import photo4d.Utils as utils
23 | import photo4d.Detect_Sift as ds
24 | import photo4d.Image_utils as iu
25 |
26 |
27 | class Photo4d(object):
28 | # Class constants
29 | # folders
30 | IMAGE_FOLDER = 'Images'
31 | ORI_FOLDER = "Ori-Ini"
32 | ORI_FINAL = "Ori-Bascule"
33 | MASK_FOLDER = 'Masks'
34 | GCP_FOLDER = 'GCP'
35 | RESULT_FOLDER = "Results"
36 | # file names
37 | GCP_COORD_FILE_INIT = 'GCPs_coordinates.xml'
38 | GCP_COORD_FILE_FINAL = 'GCPs_pick-S3D.xml'
39 | DF_DETECT_FILE = 'df_detect.csv'
40 | SET_FILE = 'set_definition.txt'
41 | GCP_PRECISION=0.2 # GCP precision in m
42 | GCP_POINTING_PRECISION=10 # Pointing precision of GCPs in images (pixels)
43 | GCP_PICK_FILE = 'GCPs_pick.xml'
44 | GCP_PICK_FILE_2D = 'GCPs_pick-S2D.xml'
45 | GCP_DETECT_FILE = 'GCPs_detect-S2D.xml'
46 | GCP_NAME_FILE = 'GCPs_names.txt'
47 | shift=[410000, 6710000, 0]
48 | useMask=False
49 | # Parameters
50 | distortion_model="Figee"
51 |
52 |
53 | def __init__(self, project_path, ext='JPG'):
54 | if not os.path.exists(project_path):
55 | print("ERROR The path " + project_path + " doesn't exists")
56 | return
57 |
58 | # add main folder
59 | self.project_path = os.path.abspath(project_path)
60 | print("Creation of object photo4d on the folder " + self.project_path)
61 |
62 | # add camera folders
63 | if os.path.exists(opj(self.project_path, Photo4d.IMAGE_FOLDER)):
64 | self.cam_folders = [opj(self.project_path, Photo4d.IMAGE_FOLDER, cam) for cam in
65 | os.listdir(opj(self.project_path, Photo4d.IMAGE_FOLDER))]
66 | self.nb_folders = len(self.cam_folders)
67 | self.cam_names = [x.split('/')[-1] for x in self.cam_folders].sort()
68 | print("Added {} camera folders : \n {}".format(self.nb_folders, '\n '.join(self.cam_folders)))
69 | else:
70 | print('You must create a folder "' + Photo4d.IMAGE_FOLDER + '/" containing your camera folders')
71 | return
72 |
73 | # =========================================================================
74 | # add picture sets
75 | picture_set_def = opj(self.project_path, Photo4d.SET_FILE)
76 | if os.path.exists(picture_set_def):
77 | self.sorted_pictures = utils.pictures_array_from_file(picture_set_def)
78 | print("Added picture sets from " + picture_set_def)
79 | else:
80 | self.sorted_pictures = None
81 | # set default selected set to the last one
82 | self.selected_picture_set = -1
83 |
84 | # =========================================================================
85 | # add initial orientation
86 | if os.path.exists(opj(self.project_path, Photo4d.ORI_FOLDER)):
87 | print("Added initial orientation")
88 | self.in_ori = opj(self.project_path, Photo4d.ORI_FOLDER)
89 | else:
90 | self.in_ori = None
91 |
92 | # =========================================================================
93 | # add image masks
94 | if os.path.exists(opj(self.project_path, Photo4d.MASK_FOLDER)):
95 | self.masks = opj(self.project_path, Photo4d.MASK_FOLDER)
96 | print("Masks created from ") # todo add the set of masks (and ori)
97 | else:
98 | self.masks = None
99 |
100 | # add GCP initial files
101 | # =========================================================================
102 | if os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)):
103 | self.gcp_coord_file = opj(self.project_path,Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)
104 | print("Added gcp coordinates file")
105 | else:
106 | self.gcp_coord_file = None
107 | if os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)):
108 | self.gcp_names = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)
109 | else:
110 | self.gcp_names = None
111 |
112 | # extension of the images
113 | self.ext = ext
114 |
115 | # condition on picture dates, to process only a few sets
116 | self.cond = None
117 |
118 | # Create temp folder
119 | self.tmp_path = opj(self.project_path, "tmp")
120 | if not os.path.exists(self.tmp_path):
121 | os.makedirs(self.tmp_path)
122 |
123 |
124 |
125 | def __str__(self):
126 | string = "\n=======================================================================\n" \
127 | "Project Photo4d located at " + self.project_path + \
128 | "\n======================================================================="
129 | string += "\n Contains {} camera folders : \n {}".format(self.nb_folders, '\n '.join(self.cam_folders))
130 | if self.sorted_pictures is None:
131 | string += "\n Pictures unsorted"
132 | else:
133 | string += "\n Pictures sorted in {} sets ".format(len(self.sorted_pictures))
134 | string += "\n The current selected set is {}".format(self.sorted_pictures[self.selected_picture_set][1:])
135 |
136 | string += "\n=======================================================================\n"
137 | if self.in_ori is not None:
138 | string += " Initial orientation computed"
139 | string += "\n=======================================================================\n"
140 |
141 | if self.masks is not None:
142 | string += " Masks done "
143 | string += "\n=======================================================================\n"
144 |
145 | if self.gcp_coord_file is not None:
146 | string += " Absolute coordinates of GCPs are given"
147 | if self.dict_image_gcp is not None:
148 | string += "\n GCPs image coordinates are computed "
149 | string += "\n=======================================================================\n"
150 |
151 | return string
152 |
153 | def sort_picture(self, time_interval=600):
154 | self.sorted_pictures = iu.sort_pictures(self.cam_folders, opj(self.project_path, Photo4d.SET_FILE),
155 | time_interval=time_interval,
156 | ext=self.ext)
157 | return self.sorted_pictures
158 |
159 |
160 | def check_picture_quality(self, luminosity_thresh=1, blur_thresh=6):
161 | '''
162 | Function to Check if pictures are not too dark and/or too blurry (e.g. fog)
163 | '''
164 | if self.sorted_pictures is None:
165 | print("ERROR You must launch the sort_pictures() method before check_pictures()")
166 | return
167 | self.sorted_pictures = iu.check_picture_quality(self.cam_folders, opj(self.project_path, Photo4d.SET_FILE),
168 | self.sorted_pictures,
169 | lum_inf=luminosity_thresh,
170 | blur_inf=blur_thresh)
171 | return self.sorted_pictures
172 |
173 |
174 | def timeSIFT_orientation(self, resolution=5000, distortion_mode='Fraser', display=False, clahe=False,
175 | tileGridSize_clahe=8):
176 | '''
177 | Function to initialize camera orientation of the reference set of images using the Micmac command Tapas
178 |
179 | :param resolution:
180 | '''
181 | # change from working dir to tmp dir
182 | os.chdir(self.tmp_path)
183 |
184 | # select the set of good pictures to estimate initial orientation
185 |
186 |
187 | for s in range(len(self.sorted_pictures)):
188 | if self.sorted_pictures[s, 1]:
189 | selected_line = self.sorted_pictures[s]
190 |
191 | for i in range(len(self.cam_folders)):
192 | in_path = opj(self.cam_folders[i], selected_line[i + 2])
193 | out_path = opj(self.tmp_path, selected_line[i + 2])
194 | if clahe:
195 | iu.process_clahe(in_path, tileGridSize_clahe, out_path=out_path)
196 | else:
197 | copyfile(in_path, out_path)
198 |
199 | # Execute mm3d command for orientation
200 | success, error = utils.exec_mm3d("mm3d Tapioca All {} {}".format(".*" + self.ext, resolution), display=display)
201 | success, error = utils.exec_mm3d(
202 | "mm3d Tapas {} {} Out={}".format(distortion_mode, ".*" + self.ext, Photo4d.ORI_FOLDER[4:]), display=display)
203 |
204 | ori_path = opj(self.project_path, Photo4d.ORI_FOLDER)
205 | if success == 0:
206 | # copy orientation file
207 | if os.path.exists(ori_path): rmtree(ori_path)
208 | copytree(opj(self.tmp_path, Photo4d.ORI_FOLDER), ori_path)
209 | self.in_ori = ori_path
210 | else:
211 | print("ERROR Orientation failed\nerror : " + str(error))
212 |
213 | os.chdir(self.project_path)
214 |
215 |
216 | def create_mask_masterIm(self, del_pictures=True, master_folder_id=0):
217 | '''
218 | Create a mask on the image of the master_folder_id for the selected set
219 | Note : Only the mask of the central (MASTER) image is necessary
220 | '''
221 |
222 | if not os.path.exists(self.tmp_path): os.makedirs(self.tmp_path)
223 | # select the set of good pictures to estimate initial orientation
224 | selected_line = self.sorted_pictures[self.selected_picture_set]
225 | in_path = opj(self.cam_folders[master_folder_id], selected_line[master_folder_id + 2])
226 | out_path = opj(self.tmp_path, selected_line[master_folder_id + 2])
227 | copyfile(in_path, out_path)
228 | ds.exec_mm3d('mm3d SaisieMasqQT {} Name=Mask.tif'.format(out_path))
229 | self.useMask=True
230 |
231 | def prepare_gcp_files(self, gcp_coords_file, file_format='N_X_Y_Z', display=True):
232 | '''
233 | Function to prepare GCP coordinate from a textfile to Micmac xml format. Make sure your text file format is correct
234 | '''
235 |
236 | if not os.path.exists(opj(self.project_path, Photo4d.GCP_FOLDER)):
237 | os.makedirs(opj(self.project_path, Photo4d.GCP_FOLDER))
238 |
239 | # copy coordinates file into the project
240 | path2txt = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)[:-4] + ".txt"
241 | copyfile(gcp_coords_file, path2txt)
242 |
243 | success, error = utils.exec_mm3d('mm3d GCPConvert #F={} {}'.format(file_format, path2txt),
244 | display=display)
245 | if success == 0:
246 | self.gcp_coord_file = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_INIT)
247 | gcp_table = np.loadtxt(path2txt, dtype=str)
248 |
249 | try:
250 | gcp_name = gcp_table[:, file_format.split('_').index("N")]
251 | np.savetxt(opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE), gcp_name, fmt='%s',
252 | newline=os.linesep)
253 | self.gcp_names = opj(self.project_path, Photo4d.GCP_FOLDER, Photo4d.GCP_NAME_FILE)
254 | except ValueError: # todo add a coherent except
255 | print("ERROR prepare_GCP_files(): Check file format and file delimiter. Delimiter is any space")
256 | else:
257 | print("ERROR prepare_GCP_files(): Check file format and file delimiter. Delimiter is any space")
258 | return 0
259 |
260 | def prepare_Camera_file(self, cam_pos_file, file_format='N_X_Y_Z', display=False):
261 | '''
262 |
263 | :param cam_pos_file:
264 | :param file_format:
265 | :param display:
266 | :param cam_distance:
267 | :return:
268 | '''
269 | cam = pd.read_csv(os.path.join(self.project_path, cam_pos_file), sep=' ', names=(file_format.split('_')), skiprows=1)
270 | all_images = pd.DataFrame()
271 | for my_cam in self.cam_folders:
272 | df = pd.DataFrame()
273 | df['N'] = os.listdir(my_cam)
274 | df['X'] = np.repeat(cam.X.loc[cam.N.values == os.path.split(my_cam)[-1]].values, df.N.shape[0])
275 | df['Y'] = np.repeat(cam.Y.loc[cam.N.values == os.path.split(my_cam)[-1]].values, df.N.shape[0])
276 | df['Z'] = np.repeat(cam.Z.loc[cam.N.values == os.path.split(my_cam)[-1]].values, df.N.shape[0])
277 |
278 | all_images = all_images.append(df, ignore_index=True)
279 | #print(all_images)
280 | all_images = all_images[file_format.split('_')]
281 | Images_pos = 'Images_pos.txt'
282 | os.chdir(self.tmp_path)
283 | with open(Images_pos, 'w') as file:
284 | file.write('#F=' + ' '.join(file_format.split('_')) + '\n')
285 | all_images.to_csv(file, index=False, sep=' ', header=False, mode='a')
286 |
287 | # 1. convert CAM position textfile to xml with oriconvert
288 | commandConv = 'mm3d OriConvert #F={} {} RAWGNSS_N'.format(file_format, Images_pos,
289 | self.GCP_PICK_FILE_2D)
290 | print(commandConv)
291 | success, error = utils.exec_mm3d(commandConv)
292 | os.chdir(self.project_path)
293 |
294 |
295 | def pick_initial_gcps(self):
296 | '''
297 | Function to pick GCP locations on the reference set of images with no a priori.
298 |
299 | Pick few GCPs (3 to 5) that MicMac can do a rough estimate of the camera orientation. Then go to pick_gcp_basc() to pick all GCPs of known location
300 | '''
301 | os.chdir(self.tmp_path)
302 |
303 | if self.gcp_coord_file is None or self.gcp_names is None:
304 | print("ERROR prepare_gcp_files must be applied first")
305 | gcp_path = opj(self.project_path, Photo4d.GCP_FOLDER)
306 | copy_tree(opj(gcp_path), opj(self.tmp_path))
307 | # select the set of image on which to pick GCPs manually
308 | selected_line = self.sorted_pictures[self.selected_picture_set]
309 | file_set = "("
310 | for i in range(len(self.cam_folders)):
311 | file_set += selected_line[i + 2] + "|"
312 | file_set = file_set[:-1] + ")"
313 |
314 | commandSaisieAppuisInitQt='mm3d SaisieAppuisInitQt "{}" Ini {} {}'.format(file_set, self.GCP_NAME_FILE,
315 | self.GCP_PICK_FILE)
316 | print(commandSaisieAppuisInitQt)
317 | utils.exec_mm3d(commandSaisieAppuisInitQt)
318 |
319 | # Go back from tmp dir to project dir
320 | os.chdir(self.project_path)
321 |
322 |
323 | def pick_all_gcps(self, resolution=5000):
324 | '''
325 | Function to pick GCP locations on the reference set of images, with a predicted position.
326 |
327 | Pick all GCPs of known location.
328 | '''
329 |
330 | os.chdir(self.tmp_path)
331 |
332 | # select the set of image on which to pick GCPs manually
333 | selected_line = self.sorted_pictures[self.selected_picture_set]
334 | file_set = "("
335 | for i in range(len(self.cam_folders)):
336 | file_set += selected_line[i + 2] + "|"
337 | file_set = file_set[:-1] + ")"
338 |
339 | command='mm3d SaisieAppuisPredicQt "{}" Bascule-Ini {} {}'.format(file_set,
340 | self.GCP_COORD_FILE_INIT,
341 | self.GCP_PICK_FILE)
342 | print(command)
343 | utils.exec_mm3d(command)
344 |
345 | # Go back from tmp dir to project dir
346 | os.chdir(self.project_path)
347 |
348 | def compute_transform(self, doCampari=False, CAM_position=False, GCP_position=True):
349 | '''
350 | Function to apply the transformation computed from the GCPs to all images.
351 |
352 | Set doCampari=True once all points are input and you are ready to carry on.
353 | '''
354 |
355 | os.chdir(self.tmp_path)
356 | if GCP_position:
357 | CAM_position=False
358 | elif CAM_position:
359 | GCP_position=False
360 | elif not GCP_position and not CAM_position:
361 | print('Must choose wither CAM position or GCP position')
362 |
363 | # select all the images
364 | file_set = ".*" + self.ext
365 |
366 | if GCP_position:
367 | commandBasc = 'mm3d GCPBascule {} Ini Bascule-Ini {} {}'.format(file_set,
368 | self.GCP_COORD_FILE_INIT,
369 | self.GCP_PICK_FILE_2D)
370 | print(commandBasc)
371 | utils.exec_mm3d(commandBasc)
372 |
373 | if(doCampari):
374 | command = 'mm3d Campari {} Bascule-Ini Bascule GCP=[{},{},{},{}] AllFree=1'.format(file_set, self.GCP_COORD_FILE_INIT, self.GCP_PRECISION, self.GCP_PICK_FILE_2D, self.GCP_POINTING_PRECISION)
375 | print(command)
376 | success, error = utils.exec_mm3d(command)
377 | if success == 0:
378 | # copy orientation file
379 | ori_path = opj(self.project_path,self.ORI_FINAL)
380 | if os.path.exists(ori_path): rmtree(ori_path)
381 | copytree(opj(self.tmp_path, Photo4d.ORI_FINAL), ori_path)
382 | else:
383 | print("ERROR Orientation failed\nerror : " + str(error))
384 | if CAM_position:
385 | # 2. Center bascule from the orientation out of Tapas to RAWGNSS_N
386 | commandBasc = 'mm3d CenterBascule {} Ini RAWGNSS_N Bascule-Ini'.format(file_set)
387 | print(commandBasc)
388 | utils.exec_mm3d(commandBasc)
389 |
390 | if(doCampari):
391 | #mm3d Campari .*JPG Ground_Init_RTL Ground_RTL EmGPS=[RAWGNSS_N,5] AllFree=1 SH=_mini
392 | command = 'mm3d Campari {} Bascule-Ini Bascule EmGPS=[RAWGNSS_N,5] AllFree=1 SH=_mini'.format(file_set)
393 | print(command)
394 | success, error = utils.exec_mm3d(command)
395 | if success == 0:
396 | # copy orientation file
397 | ori_path = opj(self.project_path, self.ORI_FINAL)
398 | if os.path.exists(ori_path): rmtree(ori_path)
399 | copytree(opj(self.tmp_path, Photo4d.ORI_FINAL), ori_path)
400 | else:
401 | print("ERROR Orientation failed\nerror : " + str(error))
402 |
403 | # Go back from tmp dir to project dir
404 | os.chdir(self.project_path)
405 |
406 |
407 |
408 |
409 | def pick_ManualTiePoints(self):
410 | '''
411 | Function to pick additional points that can be set as 'GCPs'. These will get coordinates estimates based on camera orientation, and will be used in other set of images for triangulation.
412 | This way, we artificailly increase the number of GCPs, and use the selected set of reference images as the absolute reference to which other 3D model will be orientated against.
413 |
414 | Pick as many points as possible that are landmarks across the all set of image.
415 | '''
416 |
417 | os.chdir(self.tmp_path)
418 |
419 | # select the set of image on which to pick GCPs manually
420 | selected_line = self.sorted_pictures[self.selected_picture_set]
421 | file_set = "("
422 | for i in range(len(self.cam_folders)):
423 | file_set += selected_line[i + 2] + "|"
424 | file_set = file_set[:-1] + ")"
425 |
426 | command='mm3d SaisieAppuisPredicQt "{}" Ori-Bascule {} {}'.format(file_set,
427 | self.GCP_COORD_FILE_INIT,
428 | self.GCP_PICK_FILE)
429 | print(command)
430 | utils.exec_mm3d(command)
431 | self.gcp_coord_file = opj(self.project_path,Photo4d.GCP_FOLDER, Photo4d.GCP_COORD_FILE_FINAL)
432 |
433 | # Go back from tmp dir to project dir
434 | os.chdir(self.project_path)
435 |
436 |
437 | def process_all_timesteps(self, master_folder_id=0, clahe=False, tileGridSize_clahe=8,
438 | zoomF=1, Ori='Bascule', DefCor=0.0, shift=None, keep_rasters=True, display=False):
439 | if self.sorted_pictures is None:
440 | print("ERROR You must apply sort_pictures() before doing anything else")
441 | return
442 |
443 | proc.process_all_timesteps(self.tmp_path, self.sorted_pictures, opj(self.project_path, Photo4d.RESULT_FOLDER),
444 | clahe=clahe, tileGridSize_clahe=tileGridSize_clahe, zoomF=zoomF,
445 | master_folder_id=master_folder_id, Ori=Ori, useMask=self.useMask, DefCor=DefCor,
446 | shift=shift, keep_rasters=keep_rasters, display_micmac=display)
447 |
448 |
449 |
450 | def set_selected_set(self, img_or_index: Union[int, str]):
451 | if self.sorted_pictures is None:
452 | print("ERROR You must apply sort_pictures before trying to chose a set")
453 | return
454 | else:
455 | if type(img_or_index) == int:
456 | self.selected_picture_set = img_or_index
457 | print(
458 | "\n The current selected set is now {}".format(self.sorted_pictures[self.selected_picture_set][2:]))
459 | elif type(img_or_index) == str:
460 | found, i = False, 0
461 | while (not found) and (i < len(self.sorted_pictures)):
462 | if img_or_index in self.sorted_pictures[i]:
463 | found = True
464 | self.selected_picture_set = i
465 | print("\n The current selected set is now {}".format(
466 | self.sorted_pictures[self.selected_picture_set][2:]))
467 | i += 1
468 | if not found:
469 | print('image {} not in sorted_pictures'.format(img_or_index))
470 |
471 |
472 |
473 | def clean_up_tmp(self):
474 | '''
475 | Function to delete the working folder.
476 | '''
477 | try:
478 | rmtree(self.tmp_path)
479 | except FileNotFoundError:
480 | pass
481 | except PermissionError:
482 | print("Permission Denied, cannot delete " + self.tmp_path)
483 | except OSError:
484 | pass
485 |
486 |
487 |
488 | if __name__ == "__main__":
489 |
490 | ## Initialyze the project
491 | myproj = p4d.Photo4d(project_path=r"C:\Users\lucg\Desktop\Test_V1_2019")
492 | # myproj.sort_picture()
493 | # myproj.check_picture_quality()
494 | # myproj.prepare_gcp_files(r"C:\Users\lucg\Desktop\Test_V1_2019\GCPs_coordinates_manual.txt",file_format="N_X_Y_Z")
495 |
496 | ## Create a mask on one of the master images to limit the area where correlation is attempted
497 | # myproj.create_mask_masterIm(1)
498 |
499 | ## Compute tie points throughout the stack
500 | # myproj.timeSIFT_orientation()
501 | ## TODO : mask tie points
502 |
503 | ## Deal with GCPs
504 | ## Select a set to input GCPs
505 | # myproj.set_selected_set("DSC02728.JPG")
506 | ## Input GCPs in 3 steps
507 | # myproj.pick_initial_gcps()
508 | # myproj.compute_transform()
509 | # myproj.pick_all_gcps()
510 | ## Eventually, change selected set to add GCP imput to more image (n times):
511 | #myproj.compute_transform()
512 | #myproj.set_selected_set("DSC02871.JPG")
513 | #myproj.pick_all_gcps()
514 | #myproj.compute_transform(doCampari=True)
515 |
516 | ## Do the dense matching
517 | # myproj.process_all_timesteps()
518 |
519 | ## Cleanup
520 | # myproj.clean_up_tmp()
--------------------------------------------------------------------------------
/photo4d/Detect_Sift.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import os
3 |
4 | #from photo4d.Process import pictures_array_from_file
5 | from photo4d.Image_utils import load_date
6 | import photo4d.XML_utils as uxml
7 | from photo4d.Utils import exec_mm3d
8 |
9 | from shutil import rmtree
10 | import pandas as pd
11 | import numpy as np
12 |
13 |
14 | """
15 | Compute GCP positions using a MicMac command : Tapioca
16 | """
17 |
18 |
19 | def cut_image(image_path, pos, kernel_size, output_name="", output_folder="/"):
20 | """
21 | extract a portion of an image, centered on pos, of a given size (works for .JPG only)
22 | :param image_path: path of the input image
23 | :param pos: tuple, position for the center of output image (xpos, ypos)
24 | :param kernel_size: tuple, size of the output image, in pixels
25 | :param output_name: full name for output file (ex: "output.JPG")
26 | :param output_folder: folder for saving output, must already exist
27 | """
28 | if output_name == "": output_name = image_path.split('/')[-1].split(".")[-2] + "_cut" + ".JPG"
29 | if not os.path.exists(output_folder): os.makedirs(output_folder)
30 | img = cv.imread(image_path)
31 | pos = int(pos[0]), int(pos[1])
32 | ysize, xsize = img.shape[0], img.shape[1]
33 | if (0 <= pos[0] <= ysize) and (0 <= pos[1] <= xsize):
34 | xmin, xmax = pos[1] - kernel_size[1] // 2, (pos[1] + kernel_size[1] // 2)
35 | ymin, ymax = pos[0] - kernel_size[0] // 2, pos[0] + kernel_size[0] // 2
36 |
37 | output = img[ymin:ymax, xmin:xmax]
38 |
39 | cv.imwrite(output_folder + output_name, output)
40 | else:
41 | print("\033[0;31Position {} not in the picture {}, with size {}\image ignored\033[0m".format(pos, image_path,
42 | img.shape))
43 |
44 |
45 | def detect_from_s2d_xml(s2d_xml_path, folder_list, pictures_array, samples_folder_list=None,
46 | kernel_size=(200, 200), display_micmac=False):
47 | """
48 | write extracts files from an xml with image position, and launch detection of the points for all files in folder
49 | :param s2d_xml_path: file created by the function SaisieAppuisInitQT, in MicMac
50 | :param folder_list: list of folders containing pictures. One folder is for one camera
51 | :param samples_folder_list: list of folder where to save samples, in the same order as folder_list
52 | by default, create "Samples/" folder in each camera folder
53 | :param pictures_array: array containing names of pictures to process
54 | each row is considered as a set, and pictures names must be in the same order as secondary_folder_list
55 | the first item of a row is a boolean, indicating if the set is valid or not
56 | :param kernel_size: size of the portion of picture to cut for detection (in pixels)
57 | :param display_micmac: to activate or stop printing MicMac log
58 | :return:
59 | Data Frame where each row store initial and detected image position of the tie points :
60 | [image name, gcp name, TP coord X in image_ini, TP Y ini,index of folder/camera, date,
61 | TP coord X in image detect, TP Y detect, coord X GCP ini, Y GCP ini]
62 | """
63 | # do some checks, and set default values for sample_folder_list
64 | if s2d_xml_path[-4:] != ".xml": raise IOError("The parameter S2D_xml_path must be an .xml file")
65 | nb_folders = len(folder_list)
66 | for i in range(nb_folders):
67 | folder = folder_list[i]
68 | if not os.path.exists(folder):
69 | raise IOError("Invalid path " + folder + " in folder_list")
70 | if folder[-1] != "/": folder_list[i] += "/"
71 |
72 | if samples_folder_list is None:
73 | samples_folder_list = []
74 | for i in range(nb_folders):
75 | samples_folder_list.append(folder_list[i] + "Samples/")
76 | elif nb_folders != len(samples_folder_list):
77 | print("WARNING the parameter samples_folder_list must have the same number of folders as folder_list")
78 | return
79 | for i in range(nb_folders):
80 | samples_folder = samples_folder_list[i]
81 | if not os.path.exists(samples_folder):
82 | try:
83 | os.makedirs(samples_folder)
84 | except IOError:
85 | print("WARNING Invalid path " + samples_folder + " in samples_folder_list")
86 | return
87 | if samples_folder[-1] != "/": samples_folder_list[i] += "/"
88 |
89 | # ==================================================================================================================
90 | # collect data from xml
91 | dict_img = uxml.read_S2D_xmlfile(s2d_xml_path)
92 | panda_result = [] # store all the results
93 | # iterate over pictures
94 | for image in dict_img.keys():
95 |
96 | dict_measures = dict_img[image]
97 |
98 | # try to found the image in all folders (assuming all pictures have different names)
99 | # if the image is found, do the detection in this folder
100 | found = False
101 | i = 0
102 | while not found and i < nb_folders:
103 | if image in os.listdir(folder_list[i]):
104 | found = True
105 | # if the image is found, launch the detection
106 | # ==============================================
107 | print("\nDetection launched for picture {} as reference...".format(image))
108 | for gcp in dict_measures.keys():
109 | print("\n Detection of point {} in folder {}/{}".format(gcp, i + 1, nb_folders))
110 | pos_ini = dict_measures[gcp]
111 | date = load_date(folder_list[i] + image)
112 | # add a line for the master image, with the gcp position, because micmac won't launch
113 | # the detection on this one, but the point coordinates are still useful
114 | panda_result.append(
115 | [image, gcp, kernel_size[0]/2, kernel_size[1]/2, i, date,
116 | kernel_size[0] / 2, kernel_size[1] / 2, pos_ini[0], pos_ini[1], image])
117 |
118 | # creation of extract for each picture of the folder, around the point initial position
119 | print(" Create extract for detection :\n")
120 | images_list = [] # store pictures to use for detection
121 | for line in pictures_array:
122 | if line[0]:
123 | print(" - " + line[i + 1] + "... ", end="")
124 |
125 | cut_image(folder_list[i] + line[i + 1], (pos_ini[1], pos_ini[0]), kernel_size=kernel_size,
126 | output_folder=samples_folder_list[i] + gcp + "/", output_name=line[i + 1])
127 | images_list.append(line[i + 1])
128 | print("done")
129 |
130 | # launch Tapioca on the selected files
131 | # ==============================================
132 | # create file telling MicMac which files to process
133 | uxml.write_couples_file(samples_folder_list[i] + gcp + "/" + "couples.xml", image, images_list)
134 | os.chdir(samples_folder_list[i] + gcp + "/")
135 | print("\n Launching MicMac...")
136 | command = "mm3d Tapioca File couples.xml -1 ExpTxt=1"
137 | success, error = exec_mm3d(command, display_micmac)
138 |
139 | # read results and append it to result
140 | # ==============================================
141 | print(success)
142 | if success == 0:
143 | print(" Tapioca executed with success, reading results")
144 | # read output txt files
145 | for picture_recap in os.listdir("Homol/Pastis" + image):
146 | if picture_recap.split(".")[-1] == "txt":
147 | tie_points = uxml.get_tiepoints_from_txt("Homol/Pastis" + image + "/" + picture_recap)
148 | date = load_date(folder_list[i] + picture_recap[:-4])
149 |
150 | for tie_point in tie_points:
151 | # append each tie point coordinates to the result
152 | # [image name, gcp name, TP coord X in image_ini, TP Y ini,index of folder/camera,
153 | # date, TP coord X in image detect, TP Y detect, coord X GCP ini, Y GCP ini]
154 | panda_result.append(
155 | [".".join(picture_recap.split(".")[:-1]), gcp, tie_point[0], tie_point[1], i,
156 | date,
157 | tie_point[2], tie_point[3], pos_ini[0], pos_ini[1], image])
158 | try:
159 | rmtree("Pastis")
160 | rmtree("Tmp-MM-Dir")
161 | except PermissionError:
162 | print(" couldn't erase temporary files due to permission error")
163 | else:
164 | print(" WARNING Fail in Tapioca : " + str(error))
165 |
166 | else:
167 | i += 1
168 | if not found:
169 | print("\033[0;31Picture {} cannot be find in folder_list\033[0m".format(image))
170 |
171 | return pd.DataFrame(panda_result,
172 | columns=['Image', 'GCP', 'Xini', 'Yini', 'folder_index', 'date', 'Xdetect', 'Ydetect',
173 | 'Xgcp_ini', 'Ygcp_ini', 'Image_ini'])
174 |
175 |
176 | def extract_values(df, magnitude_max=50, nb_values=5, max_dist=50, kernel_size=(200, 200), method="Median"):
177 | """
178 | extract detected positions from the DataFrame containing tie points coordinates
179 | feel free to add new methods
180 | :param df: DataFrame like the one from detect_from_s2d()
181 | :param magnitude_max: max value in pixels for the magnitude of the vector (from ini to detect)
182 | :param nb_values: max values to be used for the method
183 | the values used are the closest from the GCP initial position
184 | :param max_dist: max value in pixel for the distance from the GCP to the vector origin
185 | :param kernel_size: size of the extracts used for detection (to determine coordinates of gcp in the extracts)
186 | :param method: method to use for computing positions
187 | :return: tuple with 2 elements:
188 | - a dictionary containing the computed position of GCPs in each picture, readable for the others functions,
189 | indexed first by picture names and then by GCP names
190 | - a panda DataFrame containing the computed position of GCPs in each picture
191 | columns :
192 | ['Image', 'GCP', 'Xpos', 'Ypos', 'nb_tiepoints', 'date','nb_close_tiepoints']
193 | """
194 |
195 | # compute new positions of GCP according to the shift of each tie point
196 | df['Xshift'] = df.Xgcp_ini + df.Xdetect - df.Xini
197 | df['Yshift'] = df.Ygcp_ini + df.Ydetect - df.Yini
198 |
199 | # compute vector module
200 | df['magnitude'] = np.sqrt((df.Xini - df.Xdetect) ** 2 + (df.Yini - df.Ydetect) ** 2)
201 |
202 | # compute vector direction
203 | df['direction'] = np.arctan2((df.Xini - df.Xdetect), (df.Yini - df.Ydetect)) * 180 / np.pi + 180
204 |
205 | # compute from gcp and tie point in the initial image (gcp is in the center of the extracts)
206 | pos_center = kernel_size[0] / 2, kernel_size[1] / 2
207 | df['dist'] = np.sqrt((df.Xini - pos_center[0]) ** 2 + (df.Yini - pos_center[1]) ** 2)
208 |
209 | # filter outliers having a incoherent magnitude
210 | df_filtered = df.loc[df.magnitude <= magnitude_max]
211 |
212 | dic_image_gcp = {}
213 | result = []
214 | # iterate over images
215 | for image, group in df_filtered.groupby(['Image']):
216 | dic_gcp = {}
217 | for gcp, group_gcp in group.groupby(['GCP']):
218 | nb_tiepoints = group_gcp.shape[0]
219 | group_gcp_filtered = group_gcp.loc[group_gcp.dist <= max_dist]
220 | nb_close_tiepoints = group_gcp_filtered.shape[0]
221 | group2 = group_gcp_filtered.nsmallest(nb_values, 'dist')
222 | if group_gcp_filtered.shape[0] != 0: # if there is no values left in DataFrame, the point is ignored
223 | if method == "Median":
224 | measure = group2.Xshift.median(), group2.Yshift.median()
225 | elif method == "Mean":
226 | measure = group2.Xshift.mean(), group2.Yshift.mean()
227 | elif method == 'Min':
228 | measure = group2.Xshift.min(), group2.Yshift.min()
229 | else:
230 | print('Method must be one of these values:\n"Median"\n"Min"\n"Mean"')
231 | return
232 | date = group2.date.min()
233 | dic_gcp[gcp] = measure
234 |
235 |
236 | result.append([image, gcp, measure[0], measure[1], nb_tiepoints, date, nb_close_tiepoints])
237 | if dic_gcp != {}: dic_image_gcp[image] = dic_gcp
238 |
239 |
240 | return dic_image_gcp, pd.DataFrame(result, columns=['Image', 'GCP', 'Xpos', 'Ypos', 'nb_tiepoints', 'date',
241 | 'nb_close_tiepoints'])
242 |
243 |
244 | if __name__ == "__main__":
245 | df = detect_from_s2d_xml(
246 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/GCP/GCPs_pick-S2D.xml",
247 | ["C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_east",
248 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_mid",
249 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/Images/Cam_west"],
250 | # pictures_array=pictures_array_from_file(
251 | # "C:/Users/Alexis/Documents/Travail/Stage_Oslo/Grandeurnature/Pictures/Mini_projet/set_definition.txt"),
252 | display_micmac=False
253 | )
254 | #print(df)
255 | #df.to_csv("C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/test_beau.csv", sep=",")
256 | # df = pd.read_csv("C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/test_sift_camtot_new_gcp.csv")
257 | # result = extract_values(df, threshold=50, nb_values=5, max_dist=200, method="Median")
258 | # print(result[0])
259 |
--------------------------------------------------------------------------------
/photo4d/Image_utils.py:
--------------------------------------------------------------------------------
1 | # coding : utf8
2 | """
3 |
4 | To keep metadata when we transform the picture, we use the module pyxif (MIT License), available at :
5 | https://github.com/zenwerk/Pyxif
6 | """
7 |
8 |
9 | from datetime import datetime
10 | import pyxif, os
11 | import cv2 as cv
12 | import numpy as np
13 | import photo4d.Utils as utils
14 |
15 |
16 | def sort_pictures(folder_path_list, output_path, ext="jpg", time_interval=600, date_in_filename=False, date_pattern=None):
17 | """
18 | Regroup pictures from different folders if they are taken within timeInterval seconds of interval.
19 | Result is stored in an array/file,
20 | :param folder_path_list: list of the path to folders containing pictures, One folder must correspond to one camera (e.g. cam1/ cam2/ cam3/)
21 | :param output_path: path+filename of the output .txt file containing picture sets
22 | :param ext: extension of the pictures
23 | :param time_interval: interval in seconds, with corresponds to the maximum time elapsed between the shooting of pics
24 | :return: array with the sorted pictures. For each set a boolean is added, always True, but can be modified later
25 | """
26 | print("\n Collecting files\n..........................................")
27 | # create a list containing image names and dates for each folder
28 | list_im = []
29 | for folder_path in folder_path_list:
30 | image_date_list = []
31 | flist = os.listdir(folder_path)
32 | for filename in flist:
33 | try:
34 | if filename.split(".")[-1].lower() == ext.lower():
35 | image_date_list.append((filename, load_date(os.path.join(folder_path,filename), date_in_filename, date_pattern )))
36 | except IndexError:
37 | pass
38 | list_im.append(image_date_list)
39 | if len(list_im) < 1:
40 | print("WARNING not enough folder\nTwo or more folders are needed to sort files")
41 | return None
42 | elif [] in list_im:
43 | print("WARNING No image found in One or many folder(s)")
44 | return None
45 |
46 | sorted_pictures = []
47 | print(" Checking dates\n..........................................")
48 | with open(output_path, 'w') as f:
49 | f.write("# Pictures taken within {} of interval\n".format(time_interval))
50 |
51 | good, bad = 0, 0 # counters for correct and wrong sets
52 | # loop on the image of the first folder
53 | for image_ref in list_im[0]:
54 | date_ref = image_ref[1]
55 | pic_group = np.empty(len(list_im) + 2, dtype=object)
56 | pic_group[0] = date_ref.strftime("%Y-%m-%dT%H-%M-%S")
57 | pic_group[1] = False # the pic_group[0] is a boolean, True if all a picture is found in every folder
58 | pic_group[2] = image_ref[0]
59 | # for_file = [image_ref[0]] # list of the images taken within the interval
60 |
61 | # check for pictures taken whithin the interval
62 | for j in range(1, len(list_im)):
63 | folder = list_im[j] # list of the (filename,date) of one folder
64 | i, found = 0, False
65 | while not found and i < len(folder):
66 | date_var = folder[i][1]
67 | diff = abs(date_ref - date_var)
68 | if diff.days * 86400 + diff.seconds < time_interval: # if the two pictures are taken within 10 minutes
69 | found = True
70 | pic_group[j + 2] = folder[i][0]
71 | i += 1
72 |
73 | if None not in pic_group:
74 | good += 1
75 | pic_group[1] = True
76 | print(" Pictures found in every folder corresponding to the timeInterval " + pic_group[0] + "\n")
77 | else:
78 | bad += 1
79 | print(" Missing picture(s) corresponding to the timeInterval " + pic_group[0] + "\n")
80 |
81 | sorted_pictures.append(pic_group)
82 | with open(output_path, 'a') as f:
83 | f.write(pic_group[0] + "," + str(pic_group[1]) + "," + str(pic_group[2:]) + "\n")
84 |
85 | end_str = "# {} good set of pictures found, {} uncomplete sets, on a total of {} sets".format(good, bad, good + bad)
86 | print(end_str)
87 | with open(output_path, 'a') as f:
88 | f.write(end_str)
89 | return np.array(sorted_pictures)
90 |
91 |
92 |
93 | def check_picture_quality(folder_list, output_path, pictures_array, lum_inf=1, blur_inf=6):
94 | """
95 | This function is supposed to be called after sort_pictures, as it uses the kind of array created by sort_pictures,
96 | which could be either collected from the return value of the function, or the file "linked_files.txt" created in
97 | the main folder
98 | It will filter pictures in which brightness is inferior to lum_inf and the "blur" (variance of Laplacian) is
99 | inferior to blur_min
100 | :param folder_list:
101 | :param output_path:
102 | :param pictures_array: array with pictures name and sort output
103 | :param lum_inf:
104 | :param blur_min:
105 | :return: same array, but some booleans will be set to False
106 | """
107 |
108 | print("\n Checking pictures\n..........................................")
109 |
110 | with open(output_path, 'w') as f:
111 | f.write(
112 | "# Pictures filtered with a minimum value of {} for brightness, {} for the variance of the Laplacian\n".format(
113 | lum_inf, blur_inf))
114 | print("# Pictures filtered with a minimum value of {} for brightness, {} for the variance of the Laplacian\n".format(
115 | lum_inf, blur_inf))
116 |
117 | good, bad = 0, 0
118 | I, J = pictures_array.shape
119 | for i in range(I):
120 | if pictures_array[i, 1]:
121 | min_lum = 9999
122 | min_blur = 9999
123 | for j in range(2, J):
124 | path = os.path.join(folder_list[j - 2],pictures_array[i, j])
125 | lum = load_bright(path)
126 |
127 | if lum < min_lum:
128 | min_lum = lum
129 | blur = blurr(path, 3)
130 | if blur < min_blur:
131 | min_blur = blur
132 |
133 | print(pictures_array[i,0] + ' : Brightness = ' + str(lum) + ' : Blurriness = ' + str(blur))
134 | if min_lum < lum_inf or min_blur < blur_inf:
135 | pictures_array[i, 1] = False
136 | bad += 1
137 | else:
138 | good += 1
139 |
140 | with open(output_path, 'a') as f:
141 | for line in pictures_array:
142 | f.write(str(line[0]) + "," + str(line[1]) + "," + str(line[2:]) + "\n")
143 | end_line = " {} good set of pictures found, {} rejected sets, on a total of {} sets".format(good, bad, good + bad)
144 | f.write("#" + end_line)
145 | print(end_line)
146 | return pictures_array
147 |
148 |
149 |
150 | def load_date(filename, date_in_fname=False, date_pattern=None):
151 | """
152 | Load date of the shot, according to te image metadata
153 | :param filename: name/path of the file
154 | :param date_in_fname: Boolean for using filename as date or not
155 | :param date_pattern: a list of pattern using strptime formating system, e.g. ['cam1_%Y%m%d_%H%M.JPG','cam2_%Y%m%d_%H%M.JPG'].
156 | :return: datetime format
157 | """
158 | try:
159 | if date_in_fname:
160 | if date_pattern is None:
161 | print('ERROR: if using filename for date, date_pattern must be given')
162 | return
163 | else:
164 | if date_pattern.__len__()>1:
165 | for pat in date_pattern:
166 | try:
167 | dateimage = datetime.strptime(filename.split('/')[-1], pat)
168 | except:
169 | continue
170 | else:
171 | dateimage = datetime.strptime(filename.split('/')[-1], date_pattern)
172 |
173 | else:
174 | zeroth_dict, exif_dict, gps_dict = pyxif.load(filename)
175 | date,time=exif_dict[pyxif.PhotoGroup.DateTimeOriginal][1].split(" ")
176 | year, month,day = date.split(":")
177 | hour,minute,sec = time.split(":")
178 | dateimage= datetime(int(year), int(month), int(day), int(hour), int(minute) ,int(sec))
179 | return dateimage
180 | except KeyError:
181 | print("WARNING No date for file " + filename)
182 | return None
183 | except FileNotFoundError:
184 | print("WARNING Could not find file " + filename )
185 | return None
186 |
187 |
188 | def load_bright(filename):
189 | """
190 | Load luminosity of the shot scene, according to te image metadata
191 | :param filename: name/path of the file
192 | :return: float, level of brightness
193 |
194 | """
195 | try:
196 | zeroth_dict, exif_dict, gps_dict = pyxif.load(filename)
197 | num,denom = exif_dict[pyxif.PhotoGroup.BrightnessValue][1]
198 | brightness = num/denom
199 | return brightness
200 | except KeyError:
201 | try:
202 | f_number = exif_dict[pyxif.PhotoGroup.FNumber][1][0]/exif_dict[pyxif.PhotoGroup.FNumber][1][0]
203 | ISO_speed = exif_dict[pyxif.PhotoGroup.ISOSpeedRatings][1]
204 | expo_time = exif_dict[pyxif.PhotoGroup.ExposureTime][1][0] / exif_dict[pyxif.PhotoGroup.ExposureTime][1][0]
205 | brightness = np.log2(f_number**2) + np.log2(1/expo_time) - np.log2(2**(-7/4)*ISO_speed)
206 | return brightness
207 | except KeyError:
208 | print("WARNING No brightness data for file " + filename)
209 | print("Check if your exif data contains a 'BrightnessValue' tag ")
210 | return None
211 | except FileNotFoundError:
212 | print("WARNING Could not find file " + filename )
213 | return None
214 |
215 |
216 | def calc_lum(filename):
217 | image_bgr = cv.imread(filename)
218 | image_lab = cv.cvtColor(image_bgr, cv.COLOR_BGR2LAB)
219 | average_lum = cv.mean(cv.split(image_lab)[0])
220 | return average_lum
221 |
222 |
223 |
224 | def process_clahe_folder(in_folder, tileGridSize, grey=False, out_folder="", clip_limit=2,new_name_end="_Clahe"):
225 | """
226 | Apply CLAHE to all jpeg files if a given folder
227 | It is not possible to overwrite files, because the initial files are needed to copy-past metadata
228 |
229 | :param in_folder: input folder path
230 | :param tileGridSize: size of the "blocks" to apply local histogram equalization
231 | :param grey: if True, the image will be converted to grayscale
232 | :param clip_limit: contrast limit, used to avoid too much noise
233 | :param out_folder: output folder path
234 | :param new_name_end: string put at the end of output files, without the extension
235 | :return:
236 | """
237 |
238 | # Process all the jpeg pictures in the following folder
239 | flist = np.sort(os.listdir(in_folder))
240 |
241 | for f in flist:
242 | try:
243 | if f.split(".")[-1].lower() in ["jpg","jpeg"]:
244 | in_path = in_folder + f
245 | if out_folder == "": out_folder = in_folder
246 | out_path = out_folder + f[:-4] + new_name_end + ".JPG"
247 |
248 | process_clahe(in_path, tileGridSize, grey=grey, out_path=out_path, clip_limit=clip_limit)
249 | except IndexError:
250 | pass
251 |
252 |
253 | def process_clahe(in_path, tileGridSize, grey=False, out_path="", clip_limit=2):
254 | """
255 | Appy CLAHE (contrast limited adaptive histogram equalization) method on an image
256 | for more information about CLAHE, see https://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html
257 |
258 | Overwriting image will raise an error, as the initial image is needed to copy-past metadata
259 | :param in_path: input image
260 | :param tileGridSize: size of the "blocks" to apply local histogram equalization
261 | :param grey: if True, the image will be converted to grayscale
262 | :param out_path: output path, the folders must exists and the image extension must be valid
263 | by default, output will be saved as input_path/input_name_clahe.JPG
264 | :param clip_limit: contrast limit, used to avoid too much noise
265 | """
266 | if out_path == "":
267 | out_path = ".".join(in_path.split(".")[:-1]) + "_clahe.JPG"
268 |
269 | # read input
270 | print("Processing CLAHE method on " + in_path.split("/")[-1])
271 | img = cv.imread(in_path)
272 |
273 | # convert color to gray
274 | if grey: img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
275 |
276 | # apply a median filter before clahe
277 | img = cv.medianBlur(img, 3)
278 |
279 | # create clahe object
280 | clahe = cv.createCLAHE(clipLimit=clip_limit, tileGridSize=(tileGridSize, tileGridSize)) # CLAHE
281 |
282 | # apply CLAHE for each image channel, and then recreate the full image (only useful if gray==False)
283 | channels_ini = cv.split(img)
284 | channels_final = []
285 | for channel in channels_ini:
286 | # Apply CLAHE
287 | channels_final.append(clahe.apply(channel))
288 | img_final = cv.merge(channels_final)
289 |
290 | # save image and write metadata from initial file
291 | cv.imwrite(out_path, img_final)
292 | pyxif.transplant(in_path, out_path)
293 |
294 | def blurr(filename,ksize = 3):
295 | image_bgr = cv.imread(filename) # todo la converstion en gray devrait être fait à cette ligne
296 | # image_gray = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY)
297 | return np.log(cv.Laplacian(image_bgr, cv.CV_64F,ksize=ksize).var())
--------------------------------------------------------------------------------
/photo4d/MicMac-LocalChantierDescripteur.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 1 1
6 |
7 | .*_1.JPG
8 | cam1
9 |
10 |
11 |
12 | 1 1
13 |
14 | .*_2.JPG
15 | cam2
16 |
17 |
18 |
19 | 1 1
20 |
21 | .*_3.JPG
22 | cam3
23 |
24 |
25 | NKS-Assoc-STD-CAM
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/photo4d/Process.py:
--------------------------------------------------------------------------------
1 | # coding : uft8
2 | # Generic imports
3 | import os
4 | from shutil import rmtree, move
5 | # Photo4D imports
6 | from photo4d.Utils import exec_mm3d
7 | import re
8 | from shutil import copyfile
9 |
10 | def process_one_timestep(work_folder, pictures_array, timestep, output_folder,
11 | clahe=False, tileGridSize_clahe=8, zoomF=1,
12 | master_folder_id=0, Ori='Bascule', useMask=False, DefCor=0.0,
13 | shift=None, keep_rasters=True, display_micmac=False):
14 | os.chdir(work_folder)
15 | I, J = pictures_array.shape
16 |
17 | if pictures_array[timestep, 1]: #just skip if the set is invalid somehow
18 | # Setup parameters
19 | selected_line=pictures_array[timestep]
20 | img_set="("
21 | for j in range(2,J):
22 | img_set += selected_line[j] + "|"
23 | img_set = img_set[:-1] + ")"
24 |
25 | master_img=selected_line[master_folder_id + 2]
26 | date_str=selected_line[0]
27 | ply_name= date_str + '.ply'
28 |
29 | # Don't run again if already existing
30 | if os.path.exists(os.path.join(work_folder,ply_name)):
31 | move(os.path.join(work_folder,ply_name), os.path.join(output_folder,ply_name))
32 | if not os.path.exists(os.path.join(output_folder,ply_name)):
33 | # Define Malt command and run it
34 | if(useMask):
35 | copyfile(os.path.join(work_folder,'Mask.tif'),os.path.join(work_folder,master_img[:-4] +'_Masq.tif'))
36 | copyfile(os.path.join(work_folder,'Mask.xml'),os.path.join(work_folder,master_img[:-4] +'_Masq.xml'))
37 | command='mm3d Malt GeomImage {} {} Master={} DefCor={} ZoomF={}'.format(img_set, Ori,
38 | master_img, DefCor, zoomF)
39 | else:
40 | command='mm3d Malt GeomImage {} {} Master={} DefCor={} ZoomF={}'.format(img_set, Ori,
41 | master_img, DefCor, zoomF)
42 | print(command)
43 | success, error = exec_mm3d(command, display_micmac)
44 |
45 | if not (sucess == 0):
46 | print('Something went wrong :' + str(error))
47 | else:
48 | # Find the last depth map and correlation file
49 | # Get a list of all the files in the Malt output folder
50 | files=[]
51 | for f in os.walk(os.path.join(work_folder,'MM-Malt-Img-'+ master_img[:-4])):
52 | for file in f:
53 | files=file
54 | nuage_re = re.compile(r'NuageImProf_STD-MALT_Etape_\d{1}.xml')
55 | correlation_re = re.compile(r'Correl_STD-MALT_Num_\d{1}.tif')
56 | depth_re = re.compile(r'Z_Num\d{1}_DeZoom' + str(zoomF) +'_STD-MALT.tif')
57 | nuage_files = [ x for x in files if nuage_re.match(x)]
58 | correlation_files = [ x for x in files if correlation_re.match(x)]
59 | depth_files = [ x for x in files if depth_re.match(x)]
60 | sorted_nuage_files = sorted(nuage_files,reverse=True)
61 | sorted_correlation_files = sorted(correlation_files,reverse=True)
62 | sorted_depth_files = sorted(depth_files,reverse=True)
63 | last_nuage=sorted_nuage_files[0]
64 | last_cor=sorted_correlation_files[0]
65 | last_depth=sorted_depth_files[0]
66 |
67 | # Create the point cloud
68 | if shift is None:
69 | command = 'mm3d Nuage2Ply MM-Malt-Img-{}/{} Attr={} Out={}'.format(
70 | '.'.join(master_img.split('.')[:-1]), last_nuage, master_img, ply_name)
71 | else:
72 | command = 'mm3d Nuage2Ply MM-Malt-Img-{}/{} Attr={} Out={} Offs={}'.format(
73 | '.'.join(master_img.split('.')[:-1]), last_nuage, master_img, ply_name, str(shift).replace(" ", ""))
74 |
75 | print(command)
76 | success, error = exec_mm3d(command, True)
77 |
78 | # Copy result to result folder
79 | # .ply point cloud
80 | move(os.path.join(work_folder,ply_name), os.path.join(output_folder,ply_name))
81 | # If we want to keep the correlation map and the depth map
82 | if(keep_rasters):
83 | move(os.path.join(work_folder,'MM-Malt-Img-' + master_img,last_cor), os.path.join(output_folder,date_str + '_Correlation.tif'))
84 | move(os.path.join(work_folder,'MM-Malt-Img-' + master_img,last_depth), os.path.join(output_folder,date_str + '_DepthMap.tif'))
85 |
86 | # Clean-up
87 |
88 | try:
89 | rmtree(os.path.join(work_folder,'MM-Malt-Img-' + master_img[:-4]))
90 | except FileNotFoundError:
91 | pass
92 | except PermissionError:
93 | print("Permission Denied, cannot delete " + os.path.join(work_folder,'MM-Malt-Img-' + master_img))
94 | except OSError:
95 | pass
96 | try:
97 | rmtree(os.path.join(work_folder,"Pyram"))
98 | except PermissionError:
99 | print("Permission Denied, cannot delete Pyram folder")
100 | except OSError:
101 | pass
102 | # Go back to project folder
103 | os.chdir('../')
104 |
105 |
106 | def process_all_timesteps(work_folder, pictures_array, output_folder,
107 | clahe=False, tileGridSize_clahe=8, zoomF=1,
108 | master_folder_id=0, Ori='Bascule', useMask=False, DefCor=0.0,
109 | shift=None, keep_rasters=True, display_micmac=False):
110 | """
111 | Run MicMac (mm3d Malt) on all valid picture sets
112 | It is advised to give only absolute path in parameters
113 |
114 |
115 | :param work_folder: folder where the images and orientations are
116 | :param pictures_array: array with set definitions (also, validity of sets and timestamp)
117 | :param output_folder: directory for saving results
118 | :param clahe: if True, apply a "contrast limited adaptive histogram equalization" on the pictures before processing
119 |
120 | MicMac parameters: (see more documentation on official github and wiki (hopefully))
121 | :param zoomF: final zoom in the pyramidal correlation scheme
122 | :param master_folder_id: id of the folder containing the master images (central image of sets)
123 | :param Ori: Orientation to use for the correlation (Def='Bascule', the output of 'Class_photo4D.compute_transform(True)')
124 | :param DefCor: correlation threshold to reject area in the correlation process ([0-1] def=0)
125 | :param shift: shift for saving ply (if numbers are too big for 32 bit ply) [shiftE, shiftN, shiftZ]
126 | :param keep_rasters: keep the depth map and last correlation map
127 | :param display_micmac: show MicMac consol output, only usefull to follow individual set correlation status
128 | :return:
129 | """
130 | # ==================================================================================================================
131 | # checking path and parameters :
132 | nb_folders = len(pictures_array)
133 |
134 | if type(master_folder_id) != int or not (0 <= master_folder_id < nb_folders):
135 | print("Invalid value {} for parameter master folder, value set to 0".format(master_folder_id))
136 | print("must be one index of the array secondary_folder_list")
137 | master_folder_id = 0
138 |
139 | # make output folder if not already present
140 | if not os.path.exists(output_folder): os.makedirs(output_folder)
141 |
142 | # Go through set for each set
143 | I, J = pictures_array.shape
144 | for timestep in range(I):
145 | process_one_timestep(work_folder, pictures_array, timestep, output_folder,
146 | clahe=clahe, tileGridSize_clahe=tileGridSize_clahe, zoomF=zoomF,
147 | master_folder_id=master_folder_id, Ori=Ori, useMask=useMask, DefCor=DefCor,
148 | shift=shift, keep_rasters=keep_rasters, display_micmac=display_micmac)
149 |
150 |
--------------------------------------------------------------------------------
/photo4d/Utils.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | """
3 | Some useful functions
4 | """
5 | from subprocess import Popen, PIPE, STDOUT
6 | import sys
7 | import numpy as np
8 |
9 |
10 |
11 | def exec_mm3d(command, display=True):
12 | """
13 | launch a MicMac command.
14 | As MicMac handle errors and wait for user's input, we needed a way to carry on when a command fails
15 | This function will kill the process if an error happens, allowing further process to be done
16 | :param command: MicMac command line, string beginning with mm3d
17 | to see every command allowed, see https://micmac.ensg.eu/index.php/Accueil, or Github
18 | :param display: display or not the logs from MicMac, boolean
19 | :return:
20 | """
21 | if(command[:5] != "mm3d "):
22 | print("WARNING The command must begin with mm3d\n")
23 | return 0, None
24 |
25 | print('\n\n================================================')
26 | print(command)
27 | print('================================================\n\n')
28 | process = Popen(command.split(" "), stdout=PIPE, stderr=STDOUT)
29 | for line_bin in iter(process.stdout.readline, b''):
30 | try:
31 | line = line_bin.decode(sys.stdout.encoding)
32 | if display:
33 | sys.stdout.write(line)
34 | # if waiting for input, which means an error was generated
35 | if '(press enter)' in line:
36 | print("Error in MicMac process, abort process")
37 | process.kill()
38 | return 1, None
39 | elif 'Warn tape enter to continue' in line:
40 | print("Value error in Tapas, abort process")
41 | process.kill()
42 | return 1,'Value error in Tapas'
43 | except UnicodeDecodeError:
44 | sys.stdout.write('---cannot decode this line---')
45 |
46 | # if no error occurs
47 | return 0, None
48 |
49 |
50 | def pictures_array_from_file(filepath):
51 | """
52 | Could be way more efficient ! And does not handle any error for the moment
53 | :param filepath:
54 | :return:
55 | """
56 | print("\nRetrieving data from file " + filepath + "\n.......................................")
57 | all_lines = []
58 | with open(filepath, 'r') as file:
59 | for line in file.readlines():
60 | if line[0] != "#" :
61 | list_temp = line.split(',')
62 | length = len(list_temp)
63 | array_line = np.empty(length, dtype=object)
64 | if list_temp[0].rstrip(" ").lower() == "true":
65 | array_line[0] = True
66 | else:
67 | array_line[0] = False
68 | for i in range(1, length):
69 | array_line[i] = list_temp[i].rstrip('\n')
70 | all_lines.append(array_line)
71 | print("Done")
72 | return np.array(all_lines)
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
--------------------------------------------------------------------------------
/photo4d/XML_utils.py:
--------------------------------------------------------------------------------
1 | # coding :utf8
2 |
3 | from lxml import etree
4 | import os
5 | import pandas as pd
6 | import numpy as np
7 | import cv2 as cv
8 |
9 |
10 | # todo finir la traduction
11 | def extract_res(ori_folder_path, xmlfile='Residus.xml', do_txt=False, output_folder_path="",
12 | output_name="residuals_last_iter.txt",
13 | sep=',', ):
14 | """
15 | Read the residual file from MicMac and create a txt file (with separator, allows .csv) with only residuals of the
16 | last iteration : mean residual and residual for each picture.
17 |
18 | :param ori_folder_path: folder where is the xml residuals file, from MicMac, it is an orientation folder (name beginning with "Ori-)
19 | :param xmlfile: name of xml file, always 'Residus.xml' from MicMac
20 | ======
21 | these parameter are only usefull if do_txt==True, in this case a txt file will be writen
22 | :param do_txt: if False, no file will be written
23 | :param output_folder_path: folder where to save the txt file, by default same as ori_folder_path
24 | :param output_name: name of the output file, default "residuals_last_iter.txt"
25 | :param sep: separator of output file, default ','
26 | ======
27 | :return: 1 if failed, dictionary of residuals if no errors detected
28 | the dictionary have nb_inter, AverageResidual and names of the pictures as index
29 | the element corresponding to one picture is a tuple ('Name', 'Residual', 'PercOk', 'NbPts', 'NbPtsMul')
30 | (for more information about these values, see MicMac documentation)
31 | """
32 |
33 | file_content = ""
34 | dict = {}
35 |
36 | elements = ('Name', 'Residual', 'PercOk', 'NbPts', 'NbPtsMul')
37 | try:
38 | # Parsing of xml
39 | tree = etree.parse(ori_folder_path + xmlfile)
40 |
41 | # Getting number of iterations
42 | nb_iters = tree.xpath("/XmlSauvExportAperoGlob/Iters/NumIter")[-1].text
43 | file_content += 'nb_iters' + sep + nb_iters + '\n'
44 | dict['nb_iters'] = int(nb_iters)
45 |
46 | # Recuperation de la moyenne des residus de la derniere iteration
47 | av_resid = tree.xpath("/XmlSauvExportAperoGlob/Iters[NumIter={}][NumEtape=3]/\
48 | AverageResidual".format(nb_iters))[0].text
49 | file_content += 'AverageResidual' + sep + av_resid + '\n'
50 | dict['AverageResidual'] = float(av_resid)
51 |
52 | # Recuperation des donnees pour chaque image de la derniere iteration
53 | file_content += ('\nName{}Residual{}PercOk{}NbPts{}NbPtsMul\n'.format(sep, sep, sep, sep))
54 | for img in tree.xpath("/XmlSauvExportAperoGlob/Iters[NumIter={}]\
55 | [NumEtape=3]/OneIm".format(nb_iters)):
56 | obj = ''
57 | for e in elements:
58 | obj += img.find(e).text + sep
59 | file_content += obj + '\n'
60 | image_name = obj.split(sep)[0]
61 | dict[image_name] = obj.split(sep)[1:-1]
62 | except OSError:
63 | print("WARNING Can't open the file " + ori_folder_path + xmlfile)
64 | return 1
65 | except etree.XMLSyntaxError:
66 | print("WARNING The xml is not correct")
67 | return 1
68 |
69 | # write the txt file
70 | if do_txt:
71 | if output_folder_path == "":
72 | output_folder_path = ori_folder_path
73 |
74 | # Creation of the txt file
75 | try:
76 | with open(output_folder_path + output_name, "w") as file:
77 | file.write(file_content)
78 | except IOError:
79 | print("Cannot write file")
80 |
81 | return dict
82 |
83 |
84 | def read_S2D_xmlfile(file_path):
85 | """
86 | read a file containing pixel coordinates of points in images from the SaisieAppuis MicMac commands and put data
87 | into a dictionary
88 |
89 | :param file_path: path to the s2d xml file
90 | ========
91 | :return: a dictionary, pictures names are the indexes, and for each picture the element is a dictionary with points measurments
92 | todo cest vraiment trop mal ecrit !!!!!!
93 | { picture1 : {point1: (coordX, coordY),
94 | {point2: (coordX, coordY)}
95 | picture2 : {point1: (coordX, coordY),
96 | ...}
97 | ...}
98 | """
99 | dic_img_measures = {}
100 | # read the file
101 | try:
102 | with open(file_path, 'r'):
103 | # Parsing of xml
104 | tree = etree.parse(file_path)
105 | # loop on images
106 | for image in tree.xpath("/SetOfMesureAppuisFlottants/MesureAppuiFlottant1Im/NameIm"):
107 | dic_measures = {}
108 | # loop on the points
109 | for point in tree.xpath(
110 | "/SetOfMesureAppuisFlottants/MesureAppuiFlottant1Im[NameIm='{}']/OneMesureAF1I".format(
111 | image.text)):
112 | point_name = point[0].text.rstrip(" ")
113 | measure = point[1].text.split(" ")
114 | dic_measures[point_name] = (float(measure[0]), float(measure[1]))
115 |
116 | dic_img_measures[image.text] = dic_measures
117 |
118 | except etree.XMLSyntaxError:
119 | print("WARNING The xml file is not valid " + file_path)
120 | return
121 | except FileNotFoundError:
122 | print("WARNING Cannot find file S2D xml at " + file_path)
123 | return
124 |
125 | return dic_img_measures
126 |
127 |
128 | # todo faire qqchose de cette fonction qui craint...
129 |
130 | def count_tiepoints_from_txt(main_folder_path):
131 | """
132 | generate a panda DataFrame with the tie points found by the Tapioca command of MicMac USED WITH ExpTxt=1
133 |
134 | :param main_folder_path: path of the folder where is situated the Homol folder, or path of the Homol folder
135 | :return:
136 | - a panda DataFrame, row and column indexes are the name of pictures used, and for cell, the number
137 | of Tie points found in image row compared to img colum todo rendre ca lisible
138 | Img1 Img2 Img3
139 | Img1 0 nb1,2 nb1,3
140 | Img2 nb2,1 0 nb2,3
141 | Img3 nb3,1 nb3,1 0
142 | """
143 |
144 | # path checking
145 | if main_folder_path[-1] != "/": main_folder_path += "/"
146 | if main_folder_path.split("/")[-2] != "Homol":
147 | main_folder_path += "Homol/"
148 |
149 | try:
150 | folder_list = os.listdir(main_folder_path)
151 |
152 | # collect picture names in Homol directory, each folder is for one picture
153 | index = []
154 | for folder in folder_list:
155 | index.append(folder[6:]) # remove Pastis, the folder name being like PastisNamePicture
156 |
157 | df = pd.DataFrame(np.zeros((len(folder_list), len(folder_list))), index=index, columns=index)
158 |
159 | # count tie points
160 | s = 0 # total tie points
161 | for folder in folder_list:
162 | file_list = os.listdir(main_folder_path + folder)
163 | for filename in file_list:
164 | if filename.split('.')[-1] == 'txt':
165 | file = open(main_folder_path + folder + "/" + filename, 'r')
166 |
167 | # basically just counting the number of row in each file
168 | i = 0
169 | for line in file.readlines():
170 | i += 1
171 | s += 1
172 | df.loc[folder[6:], filename.rstrip('.txt')] = i
173 | if s == 0:
174 | print('\033[0;31m WARNING, 0 Tie Points found, please check that ExptTxt=1 in Tapioca \033[0m')
175 | return df, s
176 | except IOError:
177 | print('\033[0;31m' + "Cannot open " + main_folder_path + '\033[0m')
178 |
179 |
180 | def get_tiepoints_from_txt(path):
181 | point_list = []
182 | with open(path) as f:
183 | for line in f.readlines():
184 | point_list.append(line.split(" "))
185 | return np.array(point_list).astype(np.float)
186 |
187 |
188 | def write_S2D_xmlfile(dico_img_measures, file_name):
189 | """
190 | Write an xml file with 2D mesures of points in different images, in a way that MicMac can read it
191 | :param dico_img_measures: dictionnary containing 2D measures. Must looks like :
192 | {NameImage1 (String) : {NamePoint1 (String) : (measureX, measureY) (tuple of float),
193 | NamePoint2 (String) : (measureX, measureY) (tuple of float), todo la doc est a continuer
194 | ...},
195 | NameImage2 (String) : {NamePoint1 (String) : measure (String, 'coordPoint1Image2 coordPoint1Image2'),
196 | NamePoint2 (String) : measure (String, 'coordPoint2Image2 coordPoint2Image2'),
197 | ...}, ...}
198 | :param file_name: path or name of the output file
199 | """
200 | # Creation of the document root
201 | measures_set = etree.Element('SetOfMesureAppuisFlottants')
202 |
203 | # iterate over pictures
204 | for image, dico_measures in dico_img_measures.items():
205 |
206 | img_meas = etree.SubElement(measures_set, 'MesureAppuiFlottant1Im')
207 | name_img = etree.SubElement(img_meas, 'NameIm')
208 | name_img.text = image
209 |
210 | # iterate over measures for each picture
211 | for point, measure in dico_measures.items():
212 |
213 | pt_mes = etree.SubElement(img_meas, 'OneMesureAF1I')
214 | etree.SubElement(pt_mes, 'NamePt').text = point
215 | coord_img_pt = etree.SubElement(pt_mes, 'PtIm')
216 | coord_img_pt.text = "{} {}".format(measure[0], measure[1])
217 |
218 | # open the file for writing
219 | try:
220 | with open(file_name, 'w') as file:
221 | # Header
222 | file.write('\n')
223 | # Writing all the text we created
224 | file.write(etree.tostring(measures_set, pretty_print=True).decode('utf-8'))
225 | except IOError:
226 | print('Error while writing file')
227 | return
228 |
229 |
230 | def write_masq_xml(tif_mask, output=""):
231 | """
232 | write default xml file describing the mask from MicMac
233 | Even if this file seems useless, MicMac can throw an error without this file associated to the mask (in Malt)
234 |
235 | :param tif_mask: path to the MicMac mask, in .tif format
236 | :param output: path for output xml file
237 | """
238 | # do some checks
239 | if tif_mask.split('.')[-1] not in ["tif", "tiff"]:
240 | print("Wrong input path " + tif_mask + "\n Must be a .tif file")
241 | return
242 | if output == "":
243 | output = '.'.join(tif_mask.split('.')[:-1]) + ".xml"
244 | elif output.split('.')[-1] != "xml":
245 | print("Wrong output path " + output + "\n Must be a .xml file")
246 | return
247 |
248 | file_ori = etree.Element('FileOriMnt')
249 | name = etree.SubElement(file_ori, 'NameFileMnt')
250 | name.text = tif_mask
251 |
252 | nb_pix = etree.SubElement(file_ori, 'NombrePixels')
253 | shape = cv.imread(
254 | tif_mask).shape # todo find a easier way
255 | nb_pix.text = "{} {}".format(shape[1], shape[0])
256 | # write some default values
257 | etree.SubElement(file_ori, 'OriginePlani').text = "0 0"
258 | etree.SubElement(file_ori, 'ResolutionPlani').text = "1 1"
259 | etree.SubElement(file_ori, 'OrigineAlti').text = "0"
260 | etree.SubElement(file_ori, 'ResolutionAlti').text = "1"
261 | etree.SubElement(file_ori, 'Geometrie').text = "eGeomMNTFaisceauIm1PrCh_Px1D"
262 |
263 | # write the xml file
264 | try:
265 | with open(output, 'w') as file:
266 | file.write('\n')
267 | file.write(etree.tostring(file_ori, pretty_print=True).decode('utf-8'))
268 | except IOError:
269 | print('Error while writing file')
270 | return
271 |
272 |
273 | def change_Ori(initial_pictures, final_pictures, ori_folder_path):
274 | """
275 | Changes all the files of an Ori- folder from MicMac, in the way that every reference to initial pictures
276 | is replaced by reference to final_pictures
277 | WARNING this will totally modify the folder without backup of the initial one, think about make a copy first
278 | :param initial_pictures: list of initial pictures to be replaced, in the same order as the final one
279 | :param final_pictures: list of initial pictures to be replaced, in the same order as the final one
280 | :param ori_folder_path: path of the orientation folder (name beginning with Ori- )
281 | """
282 | # some checks
283 | print( os.path.basename(ori_folder_path)[:4])
284 | if os.path.basename(ori_folder_path)[:4] != "Ori-":
285 | print("Ori path is not valid : {}\nYou need to enter the path to the Ori-folder ".format(ori_folder_path))
286 | return
287 | elif len(initial_pictures) != len(final_pictures):
288 | print("List of input and output pictures must have the same size")
289 | return
290 | nb_pictures = len(initial_pictures)
291 |
292 | # change orientation files
293 | for j in range(nb_pictures):
294 | # rename Orientation files
295 | if os.path.exists(os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(initial_pictures[j]))):
296 | os.rename(os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(initial_pictures[j])),
297 | os.path.join(ori_folder_path, 'Orientation-{}.xml'.format(final_pictures[j])))
298 |
299 | # write a short summary
300 | with open(os.path.join(ori_folder_path,"log.txt"), 'w') as log:
301 | log.write("This orientation was not calculated by MicMac with these pictures\n\n")
302 | log.write("The names of pictures were just changed \n\n")
303 | for i in range(nb_pictures):
304 | log.write("{} was replaced by {}\n".format(initial_pictures[i], final_pictures[i]))
305 |
306 |
307 | def change_xml(initial_pictures, final_pictures, xml_path):
308 | """
309 | Replace all occurrences of initial pictures with final pictures in a xml/txt file
310 | initial pictures[i] will be replaced by final_pictures[i]
311 |
312 | :param initial_pictures: list of pictures to be replaced
313 | :param final_pictures: list of replacement pictures, in the same order as initial pictures
314 | :param xml_path: path to the file to process
315 | """
316 | # checking length
317 | if len(initial_pictures) != len(final_pictures):
318 | print("List of input and output pictures must have the same size")
319 | return
320 | nb_pictures = len(initial_pictures)
321 |
322 | # Read the xml file
323 | with open(xml_path, 'r') as file:
324 | file_data = file.read()
325 | for i in range(nb_pictures):
326 | # Replace the target string
327 | file_data = file_data.replace(initial_pictures[i], final_pictures[i])
328 | # Write the file out again
329 | with open(xml_path, 'w') as file:
330 | file.write(file_data)
331 |
332 |
333 | def write_couples_file(file_path, master_image, pictures_list):
334 | """
335 | write an xml file for micmac command Tapioca, for pictures linked to one image
336 |
337 | :param file_path: path to xml file, if it already exists, it will be replaced
338 | :param master_image: image to compare with all the others
339 | :param pictures_list:
340 | """
341 | root = etree.Element('SauvegardeNamedRel')
342 | for img in pictures_list:
343 | couple = etree.SubElement(root, 'Cple')
344 | couple.text = str(master_image) + " " + str(img)
345 | with open(file_path, 'w') as xml:
346 | xml.write('\n')
347 | xml.write(etree.tostring(root, pretty_print=True).decode('utf-8'))
348 |
349 |
350 | if __name__ == "__main__":
351 | print(read_S2D_xmlfile(
352 | "C:/Users/Alexis/Documents/Travail/Stage_Oslo/photo4D/python_script/Stats/all_points-S2D_dist_max.xml"))
353 |
--------------------------------------------------------------------------------
/photo4d/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ArcticSnow/photo4D/2210683e3d352ffb648962ff043d14e2aa24a80f/photo4d/__init__.py
--------------------------------------------------------------------------------
/photo4d/__version__.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | VERSION = (0, 2, 2)
4 |
5 | __version__ = '.'.join(map(str, VERSION))
6 |
--------------------------------------------------------------------------------
/photo4d/pdal_python_filter.py:
--------------------------------------------------------------------------------
1 | '''
2 | Custom python filters for pdal
3 |
4 | '''
5 | import numpy as np
6 | import matplotlib.colors as cl
7 | import pandas as pd
8 | #import pdb
9 |
10 | def add_XY_UTM(ins, outs):
11 | X = ins['X']
12 | X += float(pdalargs['x_offset'])
13 | Y = ins['Y']
14 | Y += float(pdalargs['y_offset'])
15 | outs['Y'] = Y
16 | outs['X'] = X
17 | return True
18 |
19 |
20 | def voxelGrid(ins, outs):
21 |
22 | ROI = [float(pdalargs['Xmin']),
23 | float(pdalargs['Xmax']),
24 | float(pdalargs['Ymin']),
25 | float(pdalargs['Ymax']),
26 | float(pdalargs['Zmin']),
27 | float(pdalargs['Zmax'])]
28 | leaf = float(pdalargs['leaf'])
29 |
30 | df = pd.DataFrame({'X': ins['X'],
31 | 'Y': ins['Y'],
32 | 'Z': ins['Z'],
33 | 'Red': ins['Red'],
34 | 'Green': ins['Green'],
35 | 'Blue': ins['Blue']})
36 |
37 | for i in range(0,6):
38 | if ROI[i]==-9999:
39 | if i==0:
40 | ROI[i] = df.iloc[:,0].min()
41 | elif i==1:
42 | ROI[i] = df.iloc[:,0].max()
43 | elif i==2:
44 | ROI[i] = df.iloc[:,1].min()
45 | elif i==3:
46 | ROI[i] = df.iloc[:,1].max()
47 | elif i==4:
48 | ROI[i] = df.iloc[:,2].min()
49 | elif i==5:
50 | ROI[i] = df.iloc[:,2].max()
51 |
52 | #print(ROI)
53 | nx = np.int((ROI[1]-ROI[0])/leaf)
54 | ny = np.int((ROI[3]-ROI[2])/leaf)
55 | nz = np.int((ROI[5]-ROI[4])/leaf)
56 |
57 | bins_x = np.linspace(ROI[0], ROI[1], nx+1)
58 | df['x_cuts'] = pd.cut(df.X,bins_x, labels=False)
59 | bins_y = np.linspace(ROI[2],ROI[3], ny+1)
60 | df['y_cuts'] = pd.cut(df.Y,bins_y, labels=False)
61 | bins_z = np.linspace(ROI[4],ROI[5], nz+1)
62 | df['z_cuts'] = pd.cut(df.Z,bins_z, labels=False)
63 |
64 | grouped = df.groupby([df['x_cuts'],df['y_cuts'], df['z_cuts']])
65 |
66 | outf = pd.DataFrame()
67 | outf['X'] = np.hstack((grouped.X.mean().reset_index().X, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
68 | outf['Y'] = np.hstack((grouped.Y.mean().reset_index().Y, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
69 | outf['Z'] = np.hstack((grouped.Z.mean().reset_index().Z, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
70 | outf['Red'] = np.hstack((grouped.Red.mean().reset_index().Red, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
71 | outf['Green'] = np.hstack((grouped.Green.mean().reset_index().Green, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
72 | outf['Blue'] = np.hstack((grouped.Blue.mean().reset_index().Blue, np.zeros(ins['X'].shape[0] - grouped.X.mean().reset_index().X.shape[0])-9999))
73 | outf['Classification'] = (outf.X==-9999)*13
74 | outf = outf.dropna()
75 |
76 | outs['X'] = np.array(outf.X.astype(' thresh) * 3
133 | ground = (hsv[:,2] <= thresh) * 2
134 |
135 | dt = np.dtype(('u1'))
136 | snow = snow.astype(dt)
137 | ground = ground.astype(dt)
138 | cls = snow + ground
139 |
140 | outs['Classification'] = cls
141 |
142 | return True
143 |
144 |
145 | def mask_ground(ins, outs):
146 | "Mask to keep ground patches only based on point Value in the HSV colorspace"
147 | thresh = 180
148 |
149 | rgb = np.vstack((ins['Red'], ins['Green'], ins['Blue'])).T
150 | hsv = cl.rgb_to_hsv(rgb)
151 | ground = hsv[:,2] <= thresh
152 | outs['Mask'] = ground
153 |
154 | return True
155 |
156 |
157 | def rgb2value(ins, outs):
158 | rgb = np.vstack((ins.get('Red'), ins.get('Green'), ins.get('Blue'))).T
159 | hsv = cl.rgb_to_hsv(rgb)
160 | #pdb.set_trace()
161 | outs['Value'] = hsv[:,2].astype('float64')
162 |
163 | return True
164 |
165 |
166 |
167 | def mask_snow(ins, outs):
168 | "Mask to keep ground patches only based on point Value in the HSV colorspace"
169 | thresh = 180
170 |
171 | rgb = np.vstack((ins['Red'], ins['Green'], ins['Blue'])).T
172 | hsv = cl.rgb_to_hsv(rgb)
173 | snow = hsv[:,2] > thresh
174 | outs['Mask'] = snow
175 |
176 | return True
177 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Note: To use the 'upload' functionality of this file, you must:
5 | # $ pip install twine
6 |
7 | import io
8 | import os
9 | import sys
10 | from shutil import rmtree
11 | import photo4d
12 |
13 | from setuptools import find_packages, setup, Command
14 |
15 | # Package meta-data.
16 | NAME = 'photo4d'
17 | DESCRIPTION = 'Open source project to perform time-lapse photogrammetry'
18 | URL = 'https://github.com/ArcticSnow/photo4D'
19 | EMAIL = 'simon.filhol@geo.uio.no'
20 | AUTHOR = 'S. Filhol, A. Perret, G. Sutter, and L. Girod'
21 | REQUIRES_PYTHON = '>=3.6.0'
22 | VERSION = None
23 |
24 | # What packages are required for this module to be executed?
25 | REQUIRED = [
26 | 'lxml', 'pandas', 'numpy', 'matplotlib', 'opencv-python', 'pillow'
27 | ]
28 |
29 | # What packages are optional?
30 | EXTRAS = {
31 | 'Required to use the class pcl_process()': ['pdal', 'json']
32 | }
33 |
34 | # The rest you shouldn't have to touch too much :)
35 | # ------------------------------------------------
36 | # Except, perhaps the License and Trove Classifiers!
37 | # If you do change the License, remember to change the Trove Classifier for that!
38 |
39 | here = os.path.abspath(os.path.dirname(__file__))
40 |
41 | # Import the README and use it as the long-description.
42 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
43 | try:
44 | with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
45 | long_description = '\n' + f.read()
46 | except FileNotFoundError:
47 | long_description = DESCRIPTION
48 |
49 | # Load the package's __version__.py module as a dictionary.
50 | about = {}
51 | if not VERSION:
52 | with open(os.path.join(here, NAME, '__version__.py')) as f:
53 | exec(f.read(), about)
54 | else:
55 | about['__version__'] = VERSION
56 |
57 |
58 | class UploadCommand(Command):
59 | """Support setup.py upload."""
60 |
61 | description = 'Build and publish the package.'
62 | user_options = []
63 |
64 | @staticmethod
65 | def status(s):
66 | """Prints things in bold."""
67 | print('\033[1m{0}\033[0m'.format(s))
68 |
69 | def initialize_options(self):
70 | pass
71 |
72 | def finalize_options(self):
73 | pass
74 |
75 | def run(self):
76 | try:
77 | self.status('Removing previous builds…')
78 | rmtree(os.path.join(here, 'dist'))
79 | except OSError:
80 | pass
81 |
82 | self.status('Building Source and Wheel (universal) distribution…')
83 | os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
84 |
85 | self.status('Uploading the package to PyPI via Twine…')
86 | os.system('twine upload dist/*')
87 |
88 | self.status('Pushing git tags…')
89 | os.system('git tag v{0}'.format(about['__version__']))
90 | os.system('git push --tags')
91 |
92 | sys.exit()
93 |
94 |
95 | # Where the magic happens:
96 | setup(
97 | name=NAME,
98 | version=about['__version__'],
99 | description=DESCRIPTION,
100 | long_description=long_description,
101 | long_description_content_type='text/markdown',
102 | author=AUTHOR,
103 | author_email=EMAIL,
104 | python_requires=REQUIRES_PYTHON,
105 | url=URL,
106 | packages=find_packages(exclude=('tests','pyxif')),
107 | # If your package is a single module, use this instead of 'packages':
108 | # py_modules=['mypackage'],
109 |
110 | # entry_points={
111 | # 'console_scripts': ['mycli=mymodule:cli'],
112 | # },
113 | install_requires=REQUIRED,
114 | extras_require=EXTRAS,
115 | include_package_data=True,
116 | license='MIT',
117 | classifiers=[
118 | # Trove classifiers
119 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
120 | 'License :: OSI Approved :: MIT License',
121 | 'Programming Language :: Python',
122 | 'Programming Language :: Python :: 3',
123 | 'Programming Language :: Python :: 3.6',
124 | 'Programming Language :: Python :: Implementation :: CPython',
125 | 'Programming Language :: Python :: Implementation :: PyPy'
126 | ],
127 | # $ setup.py publish support.
128 | cmdclass={
129 | 'upload': UploadCommand,
130 | },
131 | )
132 |
--------------------------------------------------------------------------------