├── .editorconfig ├── .gitignore ├── .travis.yml ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── docs ├── TUTORIAL.md └── distmatching_example.png ├── pymicmac ├── __init__.py ├── logsparser │ ├── __init__.py │ ├── get_campari_nums.py │ ├── get_gcpbascule_nums.py │ ├── get_homol_diffs.py │ ├── get_redtiep_nums.py │ └── get_tapas_nums.py ├── logsplotter │ ├── __init__.py │ ├── plot_campari_nums.py │ ├── plot_gcpbascule_nums.py │ ├── plot_gcps.py │ └── plot_tiep.py ├── noodles │ ├── __init__.py │ └── noodles_exe_parallel.py ├── pointcloud │ ├── __init__.py │ └── create_parcommands_config_file_convert_ply_laz.py ├── utils_execution.py └── workflow │ ├── __init__.py │ ├── distributed_matching │ ├── __init__.py │ └── create_parcommands_config_file.py │ ├── distributed_tapioca │ ├── __init__.py │ ├── combine_distributed_tapioca_output.py │ ├── create_all_image_pairs_file.py │ └── create_parcommands_config_file.py │ └── run_workflow.py ├── requirements.txt ├── setup.py └── tests ├── 1.jpg ├── 2.jpg ├── 3.jpg ├── 4.jpg ├── 5.jpg ├── 6.jpg ├── MicMac-LocalChantierDescripteur.xml ├── Ori-IniCal └── AutoCal_Foc-3500_Cam-Aquaris_E5_HD.xml ├── list.txt ├── matching.xml ├── param-estimation.xml ├── param-estimation_orireduction.xml ├── param-estimation_reduction.xml ├── run_distributed_tapioca_local_test.sh ├── run_workflow_test.sh └── tie-point-detection.xml /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | trim_trailing_whitespace = true 11 | # Set default charset 12 | charset = utf-8 13 | 14 | # Matches multiple files with brace expansion notation 15 | 16 | # 4 space indentation 17 | [*.{py,java,r,R}] 18 | indent_size = 4 19 | 20 | # 2 space indentation 21 | [*.{js,json,yml,html,xml}] 22 | indent_size = 2 23 | 24 | [*.{md,Rmd}] 25 | trim_trailing_whitespace = false 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .cache 40 | nosetests.xml 41 | coverage.xml 42 | 43 | # Translations 44 | *.mo 45 | *.pot 46 | 47 | # Django stuff: 48 | *.log 49 | 50 | # Sphinx documentation 51 | docs/_build/ 52 | 53 | # PyBuilder 54 | target/ 55 | 56 | # Eclipse 57 | .project 58 | .pydevproject 59 | 60 | # PyCharm 61 | .idea 62 | 63 | # Coverage 64 | cover/ 65 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: generic 2 | dist: trusty 3 | sudo: required 4 | 5 | os: 6 | - linux 7 | 8 | env: 9 | matrix: 10 | - PYTHON_VERSION=3.5 11 | global: 12 | - MINCONDA_VERSION="latest" 13 | - MINCONDA_LINUX="Linux-x86_64" 14 | 15 | before_install: 16 | - MINCONDA_PYTHON_VERSION=3; 17 | - MINCONDA_OS=$MINCONDA_LINUX; 18 | - wget "http://repo.continuum.io/miniconda/Miniconda$MINCONDA_PYTHON_VERSION-$MINCONDA_VERSION-$MINCONDA_OS.sh" -O miniconda.sh; 19 | - bash miniconda.sh -b -p $HOME/miniconda 20 | - export PATH="$HOME/miniconda/bin:$PATH" 21 | - conda config --set always_yes yes 22 | - conda update -q conda 23 | - conda create -q -n test-environment python=$PYTHON_VERSION lxml matplotlib 24 | - source activate test-environment 25 | - conda install -c conda-forge -c improphoto micmac 26 | 27 | install: 28 | - pip install . 29 | 30 | script: 31 | - cd tests 32 | - travis_wait ./run_distributed_tapioca_local_test.sh 33 | - travis_wait ./run_workflow_test.sh 34 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | Contributions are welcome! 3 | Please create a pull request, and make sure that: 4 | 5 | 1. we follow the [GitHub Flow](https://guides.github.com/introduction/flow/) branching model. 6 | 2. For other development and coding style conventions, see the [NLeSC Style Guide](https://nlesc.gitbooks.io/guide/content/). 7 | 4. Don't include extra dependencies without a good reason. Only use licenses compattible with the license of this project- Apache v2.0. 8 | 5. Please, document your code, and provide unit tests. 9 | 10 | Make sure that after your contribution at least the [`run_workflow_test`](https://github.com/ImproPhoto/pymicmac/blob/master/tests/run_workflow_test.sh) passes. 11 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2016 Oscar Martinez-Rubi 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pymicmac 2 | 3 | [![Build Status](https://travis-ci.org/ImproPhoto/pymicmac.svg?branch=master)](https://travis-ci.org/ImproPhoto/pymicmac) 4 | [![Codacy Badge](https://api.codacy.com/project/badge/Grade/cedd804840ca4de0af4c6bae6939b28d)](https://www.codacy.com/app/ImproPhoto/pymicmac?utm_source=github.com&utm_medium=referral&utm_content=ImproPhoto/pymicmac&utm_campaign=Badge_Grade) 5 | [![Anaconda-Server Badge](https://anaconda.org/improphoto/pymicmac/badges/installer/conda.svg)](https://conda.anaconda.org/improphoto) 6 | [![DOI](https://zenodo.org/badge/57877195.svg)](https://zenodo.org/badge/latestdoi/57877195) 7 | 8 | This software is a result of the Netherlands eScience Center project [Improving Open-Source Photogrammetric Workflows for Processing Big Datasets](https://www.esciencecenter.nl/project/improving-open-source-photogrammetric-workflows-for-processing-big-datasets). 9 | 10 | `pymicmac` provides a Python interface for `MicMac` workflows execution and distributed computing tools for `MicMac`. 11 | [`MicMac`](http://micmac.ensg.eu) is a photogrammetric suite which contains many different tools to execute photogrammetric workflows. 12 | 13 | In short, a photogrammetric workflow contains at least: 14 | 15 | - **Tie-points detection.** First, key features in the images are extracted. This is done 16 | for example with the [SIFT algorithm](https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf). Second, the key features are cross-matched 17 | between different images to detect points corresponding to the same physical 18 | locations and that are visible in different images. The detected points are called tie-points 19 | (they are also referred to as homologous points in related literature). 20 | 21 | - **Bundle block adjustment.** The camera positions and orientations are estimated and the parameters calibrated. 22 | 23 | - **Dense image matching.** The detected tie-points are matched and 3D projected to produce the dense point cloud. The 3D points are back projected in the images to correct for projective deformation. This creates a metrically correct True-orthophoto. 24 | 25 | The MicMac suite contains several tools dedicated to each of the steps of the programmatic worflow. The tie-point detection is done with `Tapioca`, the bundle block adjustment is done with `Tapas` and the dense matching and point cloud generation is done with `Malt`, `Tawny` and `Nuage2Ply`. 26 | 27 | `pymicmac` addresses two main issues with `MicMac`: 28 | 29 | 1 `pymicmac` helps you when running a sequence of `MicMac` commands. The sequence of commands is defined in an XML. During execution `pymicmac` creates isolated execution folders separating the input data from the intermediates and the output data. `pymicmac` also adds CPU/MEM monitoring for the commands. 30 | 31 | 2 `pymicmac` contains distributed computing versions of `Tapioca` and of the matching pipeline (`Malt`, `Tawny`, `Nuage2Ply`). This allows running `Tapioca` and the matching pipeline in distributed systems such as SGE clusters or a bunch of ssh-reachable machines. 32 | 33 | The `micmac-run-workflow` tool uses the sequential commands execution tool of [`pycoeman`](https://github.com/NLeSC/pycoeman). Detailed information is provided in the [Instructions](#instructions) section. 34 | 35 | In section [Large image sets](#large-image-sets) we provide some tips on how to use `MicMac` and `pymicmac` for processing large image sets using distributed computing for the tie-points extraction and the dense image matching, and tie-points reduction for the bundle block adjustment. 36 | 37 | A step-by-step tutorial is also available in [Tutorial](https://github.com/ImproPhoto/pymicmac/tree/master/docs/TUTORIAL.md). 38 | 39 | ## Installation 40 | 41 | The easiest way to install pymicmac is by using the [Anaconda package](https://anaconda.org/ImproPhoto/pymicmac). 42 | 43 | ```bash 44 | conda install -c improphoto -c conda-forge pymicmac 45 | ``` 46 | 47 | ## Development installation 48 | 49 | Clone this repository and install it with pip (using a virtualenv is recommended): 50 | 51 | ``` 52 | git clone https://github.com/ImproPhoto/pymicmac 53 | cd pymicmac 54 | pip install -e . 55 | ``` 56 | 57 | Python dependencies: `pycoeman` and `noodles` (see https://github.com/NLeSC/pycoeman and https://github.com/NLeSC/noodles for installation instructions) 58 | 59 | Other Python dependencies (numpy, tabulate, matplotlib, lxml) are automatically installed by `pip install -e .` but some system libraries have to be installed (for example freetype is required by matplotlib and may need to be installed by the system admin) 60 | 61 | For now `pymicmac` works only in Linux systems. Requires Python 3.5. 62 | 63 | ## Instructions 64 | 65 | The tool `micmac-run-workflow` is used to execute entire photogrammetric workflows with MicMac or portions of it. We recommend splitting the workflow in three pieces: (1) tie-points extraction, (2) bundle block adjustment and (3) dense image matching. Each time the tool is executed, it creates an independent execution folder to isolate the processing from the input data. The tool can be executed as a python script (see example in `tests/run_workflow_test.sh`) or can be imported as a python module. Which MicMac commands are executed is specified with a XML configuration file. 66 | 67 | ### Workflow XML configuration file 68 | 69 | The Workflow XML configuration file format is the sequential commands XML configuration file format used by [`pycoeman`](https://github.com/NLeSC/pycoeman). For `pymicmac`, usually the first tool in any Workflow XML configuration file links to the list of images. So, we can use `` to specify a file with the list of images. Next, some XML examples: 70 | 71 | - tie-points extraction: 72 | ``` 73 | 74 | 75 | Tapioca 76 | list.txt 77 | mm3d Tapioca All ".*jpg" -1 78 | 79 | 80 | ``` 81 | 82 | - Bundle block adjustment 83 | ``` 84 | 85 | 86 | Tapas 87 | list.txt 88 | tie-point-detection/Homol 89 | mm3d Tapas Fraser ".*jpg" Out=TapasOut 90 | 91 | 92 | ``` 93 | 94 | - Dense image matching: 95 | ``` 96 | 97 | 98 | Malt 99 | list.txt 100 | param-estimation/Ori-TapasOut 101 | mm3d Malt GeomImage ".*jpg" TapasOut "Master=1.jpg" "DirMEC=Results" UseTA=1 ZoomF=1 ZoomI=32 Purge=true 102 | 103 | 104 | Nuage2Ply 105 | mm3d Nuage2Ply "./Results/NuageImProf_STD-MALT_Etape_8.xml" Attr="1.jpg" Out=1.ply 106 | 107 | 108 | ``` 109 | 110 | Following the examples above, we could execute a whole photogrammetric workflow with: 111 | ``` 112 | micmac-run-workflow -d /path/to/data -e tie-point-detection -c tie-point-detection.xml 113 | micmac-run-workflow -d /path/to/data -e param-estimation -c param-estimation.xml 114 | micmac-run-workflow -d /path/to/data -e matching -c matching.xml 115 | ``` 116 | 117 | *NOTE*: all file and folder names, specified in `` and `` must be provided relative to the folder where all the data is - `/path/to/data`. 118 | 119 | ### Monitoring 120 | 121 | The tool used by `pymicmac` to run the commands, `pycoeman` keeps the log produced by each command in a .log file. Additionally, it stores a .mon and a .mon.disk with the CPU/MEM/disk monitoring. There are `pycoeman` tools for calculating statistics and creating plots for monitoring (see https://github.com/NLeSC/pycoeman). There are some tools in `pymicmac` to analyze the logs of several `MicMac` commands: `micmac-tapas-log-anal`, `micmac-redtiep-log-anal`, `micmac-redtiep-log-anal`, `micmac-campari-log-anal`, `micmac-gcpbascule-log-anal`, `micmac-gcpbascule-log-plot`, `micmac-campari-log-plot` (the MicMac command if part of the tool name). 122 | 123 | ## Large image sets 124 | 125 | For the tie-points extraction and dense image matching the processing can be easily enhanced by using distributed computing (clusters or clouds). The reason is that the processes involved can be easily split in independent chunks (in each chunk one or more images are processed). For the bundle block adjustment, this is not the case since the involved processes usually require having data from all the images simultaneously in memory. In this case, we propose to use tie-points reduction to deal with large image sets. 126 | 127 | For more information about distributed computing and tie-points reduction, see our paper: Martinez-Rubi, Oscar, Francesco Nex, Marc Pierrot-Deseilligny, and Ewelina Rupnik. “Improving FOSS Photogrammetric Workflows for Processing Large Image Datasets.” Open Geospatial Data, Software and Standards 2 (May 15, 2017): 12. [https://doi.org/10.1186/s40965-017-0024-5.](https://doi.org/10.1186/s40965-017-0024-5). 128 | 129 | ### Distributed computing 130 | 131 | Some steps of the photogrammetric workflow, namely the tie-points extraction and the dense image matching, can be executed more efficiently on distributed computing systems exploiting the innate data parallelism in photogrametry. 132 | 133 | For example, the `Tapioca` tool (tie-points extraction) first extracts the features for each image and then cross-matches the features between image pairs. The proposed distributed computing solution divides the list of all image pairs in chunks where each chunk can be processed mostly independently. At the end of the results from each chunk processing need to be combined. 134 | 135 | We use the parallel commands execution tools of `pycoeman`. The various parallel/distributed commands are specified in an XML configuration file which is similar to the Workflow XML configuration file. An example XML configuration, where the `Tapioca` processes two data chunks, each containing half of the image pairs: 136 | 137 | ``` 138 | 139 | 140 | 0_Tapioca 141 | DistributedTapioca/0_ImagePairs.xml.list 142 | DistributedTapioca/0_ImagePairs.xml 143 | mm3d Tapioca File 0_ImagePairs.xml -1 144 | Homol 145 | 146 | 147 | 1_Tapioca 148 | DistributedTapioca/1_ImagePairs.xml.list 149 | DistributedTapioca/1_ImagePairs.xml 150 | mm3d Tapioca File 1_ImagePairs.xml -1 151 | Homol 152 | 153 | 154 | ``` 155 | 156 | The `pycoeman` tool `coeman-mon-plot-cpu-mem` can be used to get a plot of the aggregated CPU and MEM usage. 157 | 158 | #### Distributed MicMac Tools 159 | 160 | ##### Tapioca 161 | 162 | The tools `micmac-disttapioca-create-pairs`, `micmac-disttapioca-create-config`, `micmac-disttapioca-combine` together with the parallel commands execution tools of `pycoeman` are used to run `Tapioca` on distributed computing systems. 163 | 164 | In order to run `Tapioca` on a distributed system, a list containing image pairs is needed. The `micmac-disttapioca-create-pairs` tool creates such a list. The `micmac-disttapioca-create-config` tool splits the image pairs XML file in multiple chunks and creates an XML configuration file compatible with `pycoeman`: 165 | 166 | ``` 167 | micmac-disttapioca-create-config -i [input XML image pairs] -f [folder for output XMLs and file lists, one for each chunk] -n [number of image pairs per output XML, must be even number] -o [XML configuration file] 168 | ``` 169 | 170 | Next, the distributed tool can be executed on any hardware system supporting `pycoeman` (see https://github.com/NLeSC/pycoeman) using the `coeman-par-local`, `coeman-par-ssh` or `coeman-par-sge` tools. 171 | 172 | After the distributed `Tapioca` has finished, the outputs from the different chunks need to be combined. The `micmac-disttapioca-combine` tool combines all outputs in a final Homol folder: 173 | 174 | ``` 175 | micmac-disttapioca-combine -i [folder with subfolders, each subfolder with the results of the processing of a chunk] -o [output combined folder] 176 | ``` 177 | 178 | ##### Matching (Malt, Tawny, Nuage2Ply) 179 | 180 | To generate the final point cloud on a distributed computing system, the dense point matching is parallelized by `micmac-distmatching-create-config` and the parallel commands execution tool of `pycoeman`. 181 | 182 | The algorithm in `micmac-distmatching-create-config` is restricted to aerial images when the camera orientation obtained in the parameter estimation step of the photogrammetric workflow is in a cartographic reference system. From the estimated camera positions and assuming that the Z direction (along which the pictures were taken) is always pointing to the ground, the tool computes the XY bounding box that includes all the XY camera positions. The bounding box is divided in tiles like shown in the figure below: 183 | 184 | ![exampledistmatching](docs/distmatching_example.png) 185 | 186 | Each tile can be then processed by an independent process. For each tile the images intersecting their XY position with the tile are used. If needed this set of images is extended by the nearest neighbours to guarantee a minimum of 6 images per tile. 187 | 188 | The `micmac-distmatching-create-config` generates the tiles from the initial large list of imagesand creates an XML configuration file suitable for `pycoeman`: 189 | 190 | ``` 191 | micmac-distmatching-create-config -i [orientation folder] -t [Homol folder] -e [images format] -o [XML configuration file] -f [folder for extra tiles information] -n [number of tiles in x and y] 192 | ``` 193 | 194 | Next, the distributed tool can be executed on any hardware systems supported by `pycoeman` (see https://github.com/NLeSC/pycoeman). The `coeman-par-local`, `coeman-par-ssh` or `coeman-par-sge` tools are used for this purpose. 195 | 196 | ### Tie-points reduction 197 | 198 | For more efficient workflow execution, we propose to perform a tie-points reduction step before the bundle adjustment. This extra step is added to the processing chain of the parameters estimation step. For detailed explanation, please refer to the Tie-point reduction section in this [report](http://knowledge.esciencecenter.nl/content/report-tie-points.pdf). 199 | 200 | There are two tools for this purpose: `RedTieP` and `OriRedTieP`. The former tool should be preceeded by `NO_AllOri2Im` and `Martini` should preceed the latter. For examples, see `tests/param-estimation_reduction.xml` and `tests/param-estimation_orireduction.xml`. 201 | 202 | Note that after running the tie-points reduction tools, the Homol folder has to be changed (see the examples). 203 | Also note that when running `RedTieP`, it is possible to use parallel execution mode together with the tool `micmac-noodles`. See the example in `tests/param-estimation_reduction.xml`. 204 | 205 | The`micmac-homol-compare` tool can be used to compute the reduction factors. 206 | -------------------------------------------------------------------------------- /docs/TUTORIAL.md: -------------------------------------------------------------------------------- 1 | # Tutorial 2 | 3 | This tutorial is intended to drive you in the process to convert a bunch of images in some folder of your (Linux) computer into a colored dense point cloud using MicMac and pymicmac. 4 | 5 | ## Installation 6 | 7 | Before anything else, we need to install all the required software: MicMac and pymicmac and their dependencies. 8 | In this tutorial, we do the installation on Ubuntu 16.04. Note that some steps and libraries names may be different in other Linux distributions. 9 | 10 | First, we install MicMac (we get MicMac from its SVN repository): 11 | ``` 12 | # Install mercurial to be able to download MicMac 13 | sudo apt-get install mercurial 14 | 15 | # We install it in the next location (please change accordingly to your system) 16 | cd /home/oscar/sw 17 | 18 | # Clone the repo (this requires user/password) 19 | hg clone https://geoportail.forge.ign.fr/hg/culture3d 20 | 21 | # Install MicMac 22 | cd culture3d 23 | mkdir build 24 | cd build 25 | cmake .. 26 | make -j24 27 | make install 28 | 29 | # Assuming that we installed micmac in /home/oscar/sw/culture3d, add the next lines to your .bashrc (in your case replace accordingly) 30 | export LD_LIBRARY_PATH="/home/oscar/sw/culture3d/lib:$LD_LIBRARY_PATH" 31 | export PATH="/home/oscar/sw/culture3d/bin:$PATH" 32 | 33 | # Source .bashrc to activate the installation 34 | source ~/.bashrc 35 | ``` 36 | 37 | Second, pymicmac is a Python 3.5 package so we need to have a Python 3.5 installation. We recommend using Anaconda: 38 | ``` 39 | # Get the latest Anaconda installer (in 32 or 64 bits depending on your system) 40 | wget https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh 41 | 42 | # Install it 43 | bash Anaconda3-4.2.0-Linux-x86_64.sh 44 | 45 | # Create a anaconda environment for all the installation of pymicmac 46 | conda create --name python35 python=3.5 47 | 48 | # Add this line in your .bashrc 49 | source activate python35 50 | 51 | # Source .bashrc to activate the installation 52 | source ~/.bashrc 53 | ``` 54 | 55 | Third, pymicmac has some system library requirements (freetype, ssl, ffi) and also requires pycoeman and noodles: 56 | ``` 57 | # Install pycoeman dependencies 58 | sudo apt-get install libfreetype6-dev libssl-dev libffi-dev 59 | 60 | # Install pycoeman 61 | pip install git+https://github.com/NLeSC/pycoeman 62 | 63 | # Install noodles 64 | pip install git+https://github.com/NLeSC/noodles 65 | ``` 66 | 67 | Finally, we install pymicmac: 68 | ``` 69 | # Install pymicmac 70 | pip install git+https://github.com/ImproPhoto/pymicmac 71 | ``` 72 | 73 | We can test the installation: 74 | ``` 75 | mm3d -help 76 | micmac-run-workflow -h 77 | ``` 78 | 79 | ## Processing a dataset 80 | 81 | Now that we have all performed the installation of all tne required software, we will generate a colored dense point cloud. 82 | We assume that the data is in `/home/oscar/data/GRONAU/4ms_60m_1000`. Concretely the folder looks like: 83 | 84 | ``` 85 | ls /home/oscar/data/GRONAU/4ms_60m_1000 86 | coord_List2D.xml gcp_List3D.xml GrapheHom.xml Ori-IniCal 87 | IMG_0990.JPG ... 88 | ``` 89 | 90 | In addition to the set of JPG images, we also have GCPs files, a `GrapheHom.xml` file and a `Ori-IniCal` folder. 91 | The GCPs file `gcp_List3D.xml` have the 3D positions of the Ground Control Points (GCPs) and Check Points (CPs), and their 2D positions in the images are registered in the `coord_List2D.xml` file. The `GrapheHom.xml` contains the list of valid image pairs (extracted from geotag info). The `Ori-IniCal` folder contains a XML file with the initial calibration information. 92 | 93 | For pymicmac it will make our live easier if we create a file with the list of images: 94 | ``` 95 | cd /home/oscar/data/GRONAU/4ms_60m_1000 96 | ls *JPG > images.list 97 | ``` 98 | 99 | Our photogrammetric pipeline with MicMac consists of running Tapioca to extract tie-points, followed by Tapas to perform the bundle block adjustment, and finally Malt, Tawny and Nuage2Ply to get the colored dense point cloud. 100 | 101 | The commands to be executed are configured in pymicmac with XML. During execution, the commands are executed in a folder than the one where the original data is stored. For the required data links are created. pymicmac monitors the CPU/MEM and disk usage. With pymicmac we can configure any photogrammetric workflow and we can split it in parts in any way we want. In the next subsections, we present a couple of examples of different strategies to execute workflows. 102 | 103 | ### Single XML with entire workflow 104 | In this example we define a single XML to execute the entire workflow. The pymicmac XML configuration file is called `Workflow.xml` and its content is: 105 | ``` 106 | 107 | 108 | Tapioca 109 | GrapheHom.xml 110 | images.list 111 | mm3d Tapioca File GrapheHom.xml -1 112 | 113 | 114 | Tapas 115 | mm3d Tapas Fraser ".*JPG" InCal=IniCal Out=TapasOut 116 | Ori-IniCal 117 | 118 | 119 | GCPBascule 120 | mm3d GCPBascule ".*JPG" TapasOut GCPBOut gcp_List3D.xml coord_List2D.xml 121 | gcp_List3D.xml coord_List2D.xml 122 | 123 | 124 | Malt 125 | mm3d Malt Ortho ".*JPG" GCPBOut 126 | 127 | 128 | Tawny 129 | mm3d Tawny Ortho-MEC-Malt 130 | 131 | 132 | Nuage2Ply 133 | mm3d Nuage2Ply MEC-Malt/NuageImProf_STD-MALT_Etape_8.xml Attr=Ortho-MEC-Malt/Orthophotomosaic.tif Out=pointcloud.ply 134 | 135 | 136 | ``` 137 | 138 | The workflow is executed with pymicmac with: 139 | ``` 140 | cd /home/oscar/data/GRONAU/4ms_60m_1000/ 141 | micmac-run-workflow -d . -c Workflow.xml -e WorkflowOutput 142 | ``` 143 | 144 | Note that in each command the required files/folders are specified with the tags `` and ``. The specified locations are relative paths to the folder specified in the `-d` option of the `micmac-run-workflow` command, i.e. `.` which is `/home/oscar/data/GRONAU/4ms_60m_1000/`. Also note that there is no need to duplicate required items. For example, all the commands need the images (provided with ``) but it is enough to specify this in the first command. 145 | 146 | Executing `micmac-run-workflow` will first create the `WorkflowOutput` folder, then it will create links inside this folder to all the required data (specified with `` and ``) and finally will run all the commands. After the execution is finished, for each of the commands we will find in `WorkflowOutput` a log file, a mon file and and mon.disk file. These contain respectively the log of the command execution, the CPU/MEM usage and the disk usage. 147 | pycoeman has tools to obtain statistics of the CPU/MEM usage: 148 | ``` 149 | coeman-mon-stats -t Tapioca,Tapas,GCPBascule,Malt,Tawny,Nuage2Ply -f WorkflowOutput 150 | ``` 151 | 152 | 153 | ### Various XMLs to (re-)execute parts of the workflow 154 | While the previous example is useful, one can argue if only to have CPU/MEM/disk usage and a clean environment it is worthy the hassle to install and learn how to operate pymicmac, and that is a completely valid point. However, it is not for cases like the previous one that pymicmac was made. 155 | 156 | In this second example we will see when pymicmac is really beneficial. Now we want to run a workflow similar to the previous one but with a tie-points reduction step before Tapas. This will make Tapas execution faster. We also want to compare the workflow with tie-points reduction with the case when no reduction is used. More concretely we will look at the residuals of Tapas and the the errors of GCPBascule. We would also like to see the impact of Tapas if reduction is used (less memory and faster execution) 157 | 158 | Running Tapioca is common in the two workflows we want to test. Thus, we divide the workflows in two parts. The first part is Tapioca and the second part is Tapas and GCPBascule with and without tie-points reduction. In this example, since we are only interested in Tapas and GCPBascule we will not run Malt, Tawny and Nuage2Ply. 159 | 160 | The XML to run Tapioca is called `Tapioca.xml` and its content is: 161 | ``` 162 | 163 | 164 | Tapioca 165 | GrapheHom.xml 166 | images.list 167 | mm3d Tapioca File GrapheHom.xml -1 168 | 169 | 170 | ``` 171 | 172 | We execute it with: 173 | ``` 174 | cd /home/oscar/data/GRONAU/4ms_60m_1000/ 175 | micmac-run-workflow -d . -c Tapioca.xml -e TapiocaOutput 176 | ``` 177 | 178 | This will create the `TapiocaOutput` folder, make links to the images and to the `GrapheHom.xml` file and will execute Tapioca inside `TapiocaOutput`. When execution is finished, in addition to the `Homol` created by Tapioca, we will also have a log file, a mon file and a mon.disk. We can use `coeman-mon-stats` to get a overview on the CPU/MEM usage of Tapioca: 179 | ``` 180 | coeman-mon-stats -t Tapioca -f TapiocaOutput 181 | ``` 182 | 183 | Next, we define the rest of the workflow when no tie-points reduction is done. The `WorkflowNoTPR.xml` is: 184 | ``` 185 | 186 | 187 | Tapas 188 | Ori-IniCal TapiocaOutput/Homol 189 | images.list 190 | mm3d Tapas Fraser ".*JPG" InCal=IniCal Out=TapasOut 191 | 192 | 193 | GCPBascule 194 | mm3d GCPBascule ".*JPG" TapasOut GCPBOut gcp_List3D.xml coord_List2D.xml 195 | gcp_List3D.xml coord_List2D.xml 196 | 197 | ``` 198 | 199 | Note that in this case in Tapas we need to specify that we require `images.list` (we always do this in the first command in the XML) and that we also require the `Homol` folder. This will be inside the `TapiocaOutput` folder created before (which will be inside `/home/oscar/data/GRONAU/4ms_60m_1000/` so it is fine to use the relative path). We can now run the workflow with: 200 | ``` 201 | cd /home/oscar/data/GRONAU/4ms_60m_1000 202 | micmac-run-workflow -d . -c WorkflowNoTPR.xml -e NoTPROutput 203 | ``` 204 | 205 | This will create the `NoTPROutput` folder, then it will create links to the images, the `Homol` folder and the rest of files, and finally will run Tapas and GCPBascule. Like before, after the execution is finished we will find log files, mon files and mon.disk files also in `NoTPROutput` folder. 206 | 207 | Following we define the workflow with tie-points reduction. We can reuse the same `Homol`folder than before so there is not need to rerun Tapioca. The `WorkflowTPR.xml` is: 208 | ``` 209 | 210 | 211 | NO_AllOri2Im 212 | TapiocaOutput/Homol 213 | images.list 214 | mm3d TestLib NO_AllOri2Im ".*JPG" Quick=1 215 | 216 | 217 | RedTieP 218 | mm3d RedTiep ".*JPG" NumPointsX=12 NumPointsY=12 WeightAccGain=0.00; rm Homol; mv Homol-Red Homol 219 | 220 | 221 | Tapas 222 | Ori-IniCal 223 | mm3d Tapas Fraser ".*JPG" InCal=IniCal Out=TapasOut 224 | 225 | 226 | GCPBascule 227 | mm3d GCPBascule ".*JPG" TapasOut GCPBOut gcp_List3D.xml coord_List2D.xml 228 | gcp_List3D.xml coord_List2D.xml 229 | 230 | ``` 231 | 232 | First NO_AllOri2Im will run (which is required by RedTiep, the tie-points reduction tool) and since this is the first command we add the requires for the images and the `Homol` folder. Next, the actual reduction with RedTieP will be done. Note that after the reduction we will replace the `Homol` folder with the one output by the tool. But do no panic!, the `rm Homol` only deletes the link to the `Homol` folder. The full set of tie-points is still there. This is one of the benefits of separating the execution of the several parts of the workflow and of using links. Finally, we will run Tapas and GCPBascule exactly as before but in this case they will use a reduced set of tie-points. We execute it with: 233 | ``` 234 | cd /home/oscar/data/GRONAU/4ms_60m_1000 235 | micmac-run-workflow -d . -c WorkflowTPR.xml -e TPROutput 236 | ``` 237 | 238 | After the execution is done, we will find log files, mon files and mon.disk files in the `TPROutput` folder. 239 | 240 | #### Comparison of workflows 241 | 242 | We want to compare the results in the two workflows. First, we see the different CPU/MEM usage of the executed commands in both cases: 243 | ``` 244 | coeman-mon-stats -t Tapioca,NO_AllOri2Im,RedTieP,Tapas,GCPBascule -f TapiocaOutput,NoTPROutput,TPROutput 245 | ``` 246 | The output will look something like: 247 | ``` 248 | ########################## 249 | Time/CPU/MEM tools monitor 250 | ########################## 251 | #Command ExeFolder Time[s] Avail. CPU Max. CPU Mean CPU Avail. MEM[GB] Max. MEM[GB] Mean MEM[GB] 252 | ---------- ----------- --------- ------------ ---------- ---------- ---------------- -------------- -------------- 253 | Tapioca TapiocaOutput 508433 400 400 384.42 13.13 3.44 1.78 254 | NO_AllOri2Im TPROutput 607 400 182.8 38.64 13.13 3.11 2.86 255 | RedTieP TPROutput 91 400 87.1 14.76 13.13 3.05 2.8 256 | Tapas NoTPROutput 65199 400 266.4 100.28 13.13 9.18 8.82 257 | Tapas TPROutput 3154 400 321.9 111.54 13.13 4.73 3.46 258 | GCPBascule NoTPROutput 7 400 101.1 72.01 13.13 0.7 0.68 259 | GCPBascule TPROutput 6 400 104 88.84 13.13 1.61 1.59 260 | ``` 261 | Tapas when reduction is done is 30x faster and uses much less RAM. We can also see the actual reduction that has been done in tie-points set in the second workflow: 262 | ``` 263 | micmac-homol-compare -o TapiocaOutput/Homol -c NoTPROutput/Homol,TPROutput/Homol 264 | ``` 265 | The output will look like: 266 | ``` 267 | ########### 268 | Ratio Homol 269 | ########### 270 | #Name Homol dec 271 | ----------------------- ----------- 272 | NoTPROutput 1.0000 273 | TPROutput 0.0669 274 | ``` 275 | Only 6.7% of the tie-points were used! But did the images correctly oriented with only 6.7% of the tie-points?. 276 | Well, let's look at the Tapas residuals. 277 | The tool `micmac-tapas-log-anal` opens the Tapas log files, counts the number of iterations and for the last one it shows the residuals: 278 | ``` 279 | micmac-tapas-log-anal -f NoTPROutput,TPROutput 280 | ``` 281 | will report something like: 282 | ``` 283 | ########################## 284 | Tapas last residuals/worts 285 | ########################## 286 | #Name NumIter Res Wor 287 | ----------------- --------- -------- ------- 288 | NoTPROutput 160 0.642464 1.83815 289 | TPROutput 132 0.727202 1.02918 290 | ``` 291 | The residuals are a bit higher if tie-points reduction is applied. We could expect this. Note that less iterations were required with less tie-points. Next, we check what really happened with the GCPs points. The tool `micmac-gcpbascule-log-anal` reads the GCPBascule logs and computes the errors of the orientation in the GCPs. 292 | ``` 293 | micmac-gcpbascule-log-anal -x gcp_List3D.xml -f NoTPROutput,TPROutput 294 | ``` 295 | will report something like: 296 | ``` 297 | ########################### 298 | GCPBascule Dists statistics 299 | ########################### 300 | KOs 301 | #Name 302 | ----------------- ----------------------- 303 | NoTPROutput - 304 | TPR_12_12_0.00_N - 305 | 306 | GCPs 307 | #Name Min Max Mean Std Median 308 | ----------------- ------ ------ ------ ------ -------- 309 | NoTPROutput 0.0419 0.1191 0.0799 0.0278 0.0681 310 | TPROutput 0.0583 0.1182 0.096 0.0242 0.1101 311 | 312 | CPs 313 | #Name Min Max Mean Std Median 314 | ----------------- ------ ------ ------ ------ -------- 315 | NoTPROutput 0.0231 0.2374 0.0888 0.051 0.0734 316 | TPROutput 0.0287 0.2662 0.1048 0.0616 0.0785 317 | ``` 318 | 319 | The errors (in meters) of using a reduced set of tie-points increase less than 2 centimeters. 320 | 321 | The previous example is the sort of case in which pymicmac will make your life much easier, i.e. when you have to rerun parts of the workflow with different parameters (or commands) and then you want to compare between the different workflows. 322 | 323 | 324 | 325 | ### What else can do pymicmac for you? Distributed computing 326 | 327 | In the previous example we ran two workflows and we compared them. We saw that by using pymicmac the whole process is a bit less tedious. 328 | In addition to the benefits highlighted in the previous example, there are tools in pymicmac that are crucial when processing large image sets. 329 | 330 | We saw that we can use tie-points reduction to decrease the memory usage and to speed-up Tapas (the bundle block adjustment). But what happens with Tapioca and with the dense image matching (Malt, Tawny and Nuage2Ply)? We saw in one of the tables before that while Tapas without tie-points reduction took around 60,000 seconds, Tapioca took around 500,000 seconds. That is almost a week! And the dataset was not even that large, just a few hundred images. Tie-points reduction is useful to run Tapas with large sets faster and with less memory. However, with large image sets issues also arise in Tapioca and later also in the dense image matching. In these cases, a feasible choice is to use distributed computing facilities such as clouds or clusters. pymicmac has tools to run a distributed version of Tapioca and a distributed version of the dense image matching pipeline (Malt, Tawny, Nuage2Ply) in clusters (with SGE queuing system) and in a bunch of ssh-reachable machines. In order to port these tasks to distributed computing systems, a small modification in the algorithms that perform these tasks is required. The key idea is to divide the processing in chunks that can be processed independently (and in different machines), and combine the results in the end. 331 | 332 | Next we show how to use these tools in a SGE cluster. 333 | 334 | #### Distributed Tapioca 335 | 336 | In this example, we have transfered the dataset used in the previous examples to a SGE cluster. We have stored the data in the following location `/var/scratch/orubi/data/medium/4ms_60m_1000`. This is a shared location, so all the nodes of the cluster can access it. The folder contains: 337 | ``` 338 | coord_List2D.xml gcp_List3D.xml GrapheHom.xml Ori-IniCal images.list 339 | IMG_0990.JPG ... 340 | ``` 341 | 342 | In order to run the distributed Tapioca, first we need to divide the processing in chunks. The tool `micmac-disttapioca-create-config` can be used to define the different chunks and the processing that needs to be done for each chunk. 343 | ``` 344 | micmac-disttapioca-create-config -i GrapheHom.xml -o DistributedTapioca.xml -f ChunksData -n 50 345 | ``` 346 | 347 | The previous command divides the image pairs defined in `GrapheHom.xml` in chunks of 50 image pairs (if you do not have a file like `GrapheHom.xml` you can use `micmac-disttapioca-create-pairs` to create one with all possible image pairs). Which image pairs are used in each chunk is stored in files in the `ChunksData` folder. For each chunk it defines the commands to execute and all the commands are stored in `DistributedTapioca.xml`. 348 | 349 | Now we use the tool `coeman-par-sge` in pycoeman to run the list of commands in the cluster and in parallel: 350 | ``` 351 | cd /var/scratch/orubi/data/medium/4ms_60m_1000 352 | coeman-par-sge -d . -c DistributedTapioca.xml -s /home/orubi/sw/export_paths.sh -r /local/orubi/DistributedTapioca -o DistributedTapiocaAllOutputs 353 | ``` 354 | `/var/scratch/orubi/data/medium/4ms_60m_1000` is our shared data location, so all paths in the XML files are relative to this location. The various commands to run in the cluster are specified in `DistributedTapioca.xml`. The file `/home/orubi/sw/export_paths.sh` is a file that sets the environment in the nodes. Once a job is going to be executed in a certain node, the node needs to have the software available (MicMac, pymicmac and pycoeman). The execution of the jobs in the nodes will be done in the location specified, i.e. `/local/orubi/DistributedTapioca`. For each job the required data for the chunk will copied from the shared data location (`/var/scratch/orubi/data/medium/4ms_60m_1000`) to the local disk (`/local/orubi/DistributedTapioca`) so each job has a local copy of the required data for faster access. The chunk will be processed locally in the node and the output data (the partial `Homol` folders) will be copied back to the shared location. 355 | 356 | Once all the jobs have finished (check qsub) we need to combined the partial `Homol` folders: 357 | ``` 358 | micmac-disttapioca-combine -i DistributedTapiocaAllOutputs -o Homol 359 | ``` 360 | Now we have a `Homol` folder that was created in a distributed manner. We can plot the combined CPU/MEM usage for all the nodes of the cluster: 361 | ``` 362 | coeman-mon-plot-cpu-mem -i DistributedTapiocaAllOutputs -r 20 363 | ``` 364 | 365 | #### Distributed dense image matching 366 | 367 | In the previous subsection we ran Tapioca in a distributed system. After Tapioca we need to run Tapas (and maybe GCPBascule or other processes) in order to obtain the images orientation. We saw before that we can use tie-points reduction to speed-up Tapas. After we have th eimage orientation, we are ready to generate the dense colored point cloud. In MicMac this can be done with the dense image matching pipeline that consists of Malt, Tawny and Nuage2Ply. These processes are very time consuming and will generate a large amount of intermediate data that for large datasets will fill your disk storage easily. 368 | 369 | We have developed a distributed tool to run the dense image matching pipeline (Malt, Tawny and Nuage2Ply). Right now the solution only works for aerial images oriented in a cartographic reference system. In this example we show how to run it in a SGE cluster. We assume that we have the data (images) again in `/var/scratch/orubi/data/medium/4ms_60m_1000` and in this folder we also have the image orientation in the folder `Ori-Final`. 370 | 371 | In order to run the distributed dense image matching, first we need to divide the processing in chunks. The tool `micmac-distmatching-create-config` can be used to define the different chunks and the processing that needs to be done for each chunk: 372 | ``` 373 | micmac-distmatching-create-config -i TPROutput/Ori-GCPBOut -e JPG -o DistributedMatching.xml -f DistributedMatchingConfigFolder -n 60,60 374 | ``` 375 | The previous command divides the area in 3600 tiles. Which images are processed in each tile is defined in the `DistributedMatchingConfigFolder` folder. For each tile it defines the commands to execute and all the commands are stored in `DistributedMatching.xml`. 376 | 377 | Now we use the tool `coeman-par-sge` in pycoeman to run the list of commands in the cluster and in parallel: 378 | ``` 379 | cd /var/scratch/orubi/data/medium/4ms_60m_1000 380 | coeman-par-sge -d . -c DistributedMatching.xml -s /home/orubi/sw/export_paths.sh -r /local/orubi/DistributedMatching -o DistributedMatchingAllOutputs 381 | ``` 382 | 383 | `/var/scratch/orubi/data/medium/4ms_60m_1000` is our shared data location, so all paths in the XML files are relative to this location. The various commands to run in the cluster are specified in `DistributedMatching.xml`. The file `/home/orubi/sw/export_paths.sh` is a file that sets the environment in the nodes. Once a job is going to be executed in a certain node, the node needs to have the software available (MicMac, pymicmac and pycoeman). The execution of the jobs in the nodes will be done in the location specified, i.e. `/local/orubi/DistributedMatching`. For each job the required data for the tile will copied from the shared data location (`/var/scratch/orubi/data/medium/4ms_60m_1000`) to the local disk (`/local/orubi/DistributedMatching`) so each job has a local copy of the required data for faster access. The tile will be processed locally in the node and the output data (the pointlcoud of the tile) will be copied back to the shared location. 384 | 385 | After the exectuion is finished, we have a `DistributedMatchingAllOutputs` folder that contains subfolders and each subfolder contains a ply file. We can plot the combined CPU/MEM usage for all the nodes of the cluster: 386 | ``` 387 | coeman-mon-plot-cpu-mem -i DistributedMatchingAllOutputs -r 20 388 | ``` 389 | -------------------------------------------------------------------------------- /docs/distmatching_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/docs/distmatching_example.png -------------------------------------------------------------------------------- /pymicmac/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/__init__.py -------------------------------------------------------------------------------- /pymicmac/logsparser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/logsparser/__init__.py -------------------------------------------------------------------------------- /pymicmac/logsparser/get_campari_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import numpy 4 | import os 5 | import argparse 6 | from tabulate import tabulate 7 | from pymicmac import utils_execution 8 | 9 | 10 | def run(xmlFile, foldersNames): 11 | (gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile) 12 | 13 | tableGCPs = [] 14 | tableCPs = [] 15 | tableKOs = [] 16 | 17 | for folderName in foldersNames.split(','): 18 | if folderName.endswith('/'): 19 | folderName = folderName[:-1] 20 | 21 | logFileName = folderName + '/Campari.log' 22 | 23 | if os.path.isfile(logFileName): 24 | lines = open(logFileName, 'r').read().split('\n') 25 | (dsGCPs, _, _, _) = ([], [], [], []) 26 | (dsCPs, _, _, _) = ([], [], [], []) 27 | 28 | eiLinesIndexes = [] 29 | for j in range(len(lines)): 30 | if lines[j].count('End Iter'): 31 | eiLinesIndexes.append(j) 32 | 33 | gcpKOs = [] 34 | 35 | for j in range(eiLinesIndexes[-2], len(lines)): 36 | line = lines[j] 37 | if line.count('Dist'): 38 | gcp = line.split()[1] 39 | d = float(line.split('Dist')[-1].split()[0].split('=')[-1]) 40 | if gcp in gcpsXYZ: 41 | dsGCPs.append(d) 42 | elif gcp in cpsXYZ: 43 | dsCPs.append(d) 44 | else: 45 | print('GCP/CP: ' + gcp + ' not found') 46 | sys.exit(1) 47 | elif line.count('NOT OK'): 48 | gcpKOs.append(line.split(' ')[4]) 49 | 50 | if len(gcpKOs): 51 | tableKOs.append([folderName, ','.join(gcpKOs)]) 52 | else: 53 | tableKOs.append([folderName, '-']) 54 | 55 | pattern = "%0.4f" 56 | if len(dsGCPs): 57 | tableGCPs.append([folderName, pattern % 58 | numpy.min(dsGCPs), pattern % 59 | numpy.max(dsGCPs), pattern % 60 | numpy.mean(dsGCPs), pattern % 61 | numpy.std(dsGCPs), pattern % 62 | numpy.median(dsGCPs)]) 63 | else: 64 | tableGCPs.append([folderName, '-', '-', '-', '-', '-']) 65 | if len(dsCPs): 66 | tableCPs.append([folderName, pattern % 67 | numpy.min(dsCPs), pattern % 68 | numpy.max(dsCPs), pattern % 69 | numpy.mean(dsCPs), pattern % 70 | numpy.std(dsCPs), pattern % 71 | numpy.median(dsCPs)]) 72 | else: 73 | tableCPs.append([folderName, '-', '-', '-', '-', '-']) 74 | else: 75 | tableKOs.append([folderName, '-']) 76 | 77 | tableGCPs.append([folderName, '-', '-', '-', '-', '-']) 78 | tableCPs.append([folderName, '-', '-', '-', '-', '-']) 79 | 80 | 81 | print("########################") 82 | print("Campari Dists statistics") 83 | print("########################") 84 | print('KOs') 85 | print(tabulate(tableKOs, headers=['#Name', '', ])) 86 | print() 87 | 88 | header = ['#Name', 'Min', 'Max', 'Mean', 'Std', 'Median'] 89 | # header = ['#Name', 'MeanDist', 'StdDist', 'MeanXDist', 'StdXDist', 90 | # 'MeanYDist', 'StdYDist', 'MeanZDist', 'StdZDist'] 91 | 92 | print('GCPs') 93 | print(tabulate(tableGCPs, headers=header)) 94 | print() 95 | 96 | print('CPs') 97 | print(tabulate(tableCPs, headers=header)) 98 | print() 99 | 100 | 101 | def argument_parser(): 102 | # define argument menu 103 | description = "Gets statistics of Campari runs in one or more execution folders" 104 | parser = argparse.ArgumentParser(description=description) 105 | parser.add_argument( 106 | '-x', 107 | '--xml', 108 | default='', 109 | help='XML file with the 3D position of the GCPs (and possible CPs)', 110 | type=str, 111 | required=True) 112 | parser.add_argument( 113 | '-f', 114 | '--folders', 115 | default='', 116 | help='Comma-separated list of execution folders where to look for the Campari.log files', 117 | type=str, 118 | required=True) 119 | return parser 120 | 121 | 122 | def main(): 123 | try: 124 | a = utils_execution.apply_argument_parser(argument_parser()) 125 | run(a.xml, a.folders) 126 | except Exception as e: 127 | print(e) 128 | 129 | 130 | if __name__ == "__main__": 131 | main() 132 | -------------------------------------------------------------------------------- /pymicmac/logsparser/get_gcpbascule_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import numpy 4 | import os 5 | import math 6 | import argparse 7 | from tabulate import tabulate 8 | from pymicmac import utils_execution 9 | 10 | 11 | def run(xmlFile, foldersNames): 12 | (gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile) 13 | numGCPs = len(gcpsXYZ) 14 | numCPs = len(cpsXYZ) 15 | numPs = numGCPs + numCPs 16 | 17 | tableGCPs = [] 18 | tableCPs = [] 19 | tableKOs = [] 20 | 21 | for folderName in foldersNames.split(','): 22 | if folderName.endswith('/'): 23 | folderName = folderName[:-1] 24 | 25 | logFileName = folderName + '/GCPBascule.log' 26 | 27 | if os.path.isfile(logFileName): 28 | lines = open(logFileName, 'r').read().split('\n') 29 | (dsGCPs, usGCPs, vsGCPs, wsGCPs) = ([], [], [], []) 30 | (dsCPs, usCPs, vsCPs, wsCPs) = ([], [], [], []) 31 | 32 | pKOs = [] 33 | 34 | for line in lines: 35 | if line.count('Dist'): 36 | gcp = line.split()[1] 37 | fields = line.split('[')[-1].split(']')[0].split(',') 38 | # u = math.fabs(float(fields[0])) 39 | # v = math.fabs(float(fields[1])) 40 | # w = math.fabs(float(fields[2])) 41 | d = float(line.split('Dist')[-1].split()[0].split('=')[-1]) 42 | 43 | if gcp in gcpsXYZ: 44 | dsGCPs.append(d) 45 | # usGCPs.append(u) 46 | # vsGCPs.append(v) 47 | # wsGCPs.append(w) 48 | elif gcp in cpsXYZ: 49 | dsCPs.append(d) 50 | # usCPs.append(u) 51 | # vsCPs.append(v) 52 | # wsCPs.append(w) 53 | else: 54 | raise Exception('GCP/CP: ' + gcp + ' not found') 55 | 56 | elif line.count('NOT OK'): 57 | pKOs.append(line.split(' ')[4]) 58 | 59 | numGCPsFile = len(dsGCPs) 60 | numCPsFile = len(dsCPs) 61 | numPKOsFile = len(pKOs) 62 | numPsFile = numGCPsFile + numCPsFile + numPKOsFile 63 | 64 | if numPsFile != numPs: 65 | print( 66 | "WARNING: number of GCPs/CPs (" + 67 | str(numPsFile) + 68 | ") processed in " + 69 | folderName + 70 | ' does not match the number in ' + 71 | xmlFile + 72 | "(" + 73 | str(numPs) + 74 | ")") 75 | 76 | if len(pKOs): 77 | tableKOs.append([folderName, ','.join(pKOs)]) 78 | else: 79 | tableKOs.append([folderName, '-']) 80 | 81 | pattern = "%0.4f" 82 | if len(dsGCPs): 83 | tableGCPs.append([folderName, pattern % 84 | numpy.min(dsGCPs), pattern % 85 | numpy.max(dsGCPs), pattern % 86 | numpy.mean(dsGCPs), pattern % 87 | numpy.std(dsGCPs), pattern % 88 | numpy.median(dsGCPs)]) 89 | #tableGCPs.append([folderName, pattern % numpy.mean(dsGCPs), pattern % numpy.std(dsGCPs), pattern % numpy.mean(usGCPs), pattern % numpy.std(usGCPs), pattern % numpy.mean(vsGCPs), pattern % numpy.std(vsGCPs), pattern % numpy.mean(wsGCPs), pattern % numpy.std(wsGCPs)]) 90 | else: 91 | tableGCPs.append([folderName, '-', '-', '-', '-', '-']) 92 | #tableGCPs.append([folderName, '-', '-', '-', '-', '-', '-', '-', '-']) 93 | if len(dsCPs): 94 | tableCPs.append([folderName, pattern % 95 | numpy.min(dsCPs), pattern % 96 | numpy.max(dsCPs), pattern % 97 | numpy.mean(dsCPs), pattern % 98 | numpy.std(dsCPs), pattern % 99 | numpy.median(dsCPs)]) 100 | #tableCPs.append([folderName, pattern % numpy.mean(dsCPs), pattern % numpy.std(dsCPs), pattern % numpy.mean(usCPs), pattern % numpy.std(usCPs), pattern % numpy.mean(vsCPs), pattern % numpy.std(vsCPs), pattern % numpy.mean(wsCPs), pattern % numpy.std(wsCPs)]) 101 | else: 102 | tableCPs.append([folderName, '-', '-', '-', '-', '-']) 103 | #tableCPs.append([folderName, '-', '-', '-', '-', '-', '-', '-', '-']) 104 | else: 105 | tableKOs.append([folderName, '-']) 106 | 107 | tableGCPs.append([folderName, '-', '-', '-', '-', '-']) 108 | #tableGCPs.append([folderName, '-', '-', '-', '-', '-', '-', '-', '-']) 109 | tableCPs.append([folderName, '-', '-', '-', '-', '-']) 110 | #tableCPs.append([folderName, '-', '-', '-', '-', '-', '-', '-', '-']) 111 | 112 | print("###########################") 113 | print("GCPBascule Dists statistics") 114 | print("###########################") 115 | print('KOs') 116 | print(tabulate(tableKOs, headers=['#Name', '', ])) 117 | print() 118 | 119 | header = ['#Name', 'Min', 'Max', 'Mean', 'Std', 'Median'] 120 | # header = ['#Name', 'MeanDist', 'StdDist', 'MeanXDist', 'StdXDist', 121 | # 'MeanYDist', 'StdYDist', 'MeanZDist', 'StdZDist'] 122 | 123 | print('GCPs') 124 | print(tabulate(tableGCPs, headers=header)) 125 | print() 126 | 127 | print('CPs') 128 | print(tabulate(tableCPs, headers=header)) 129 | print() 130 | 131 | 132 | def argument_parser(): 133 | # define argument menu 134 | description = "Gets statistics of GCPBascule runs in one or more execution folders" 135 | parser = argparse.ArgumentParser(description=description) 136 | parser.add_argument( 137 | '-x', 138 | '--xml', 139 | default='', 140 | help='XML file with the 3D position of the GCPs (and possible CPs)', 141 | type=str, 142 | required=True) 143 | parser.add_argument( 144 | '-f', 145 | '--folders', 146 | default='', 147 | help='Comma-separated list of execution folders where to look for the GCPBascule.log files', 148 | type=str, 149 | required=True) 150 | return parser 151 | 152 | 153 | def main(): 154 | try: 155 | a = utils_execution.apply_argument_parser(argument_parser()) 156 | run(a.xml, a.folders) 157 | except Exception as e: 158 | print(e) 159 | 160 | 161 | if __name__ == "__main__": 162 | main() 163 | -------------------------------------------------------------------------------- /pymicmac/logsparser/get_homol_diffs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import numpy 4 | import argparse 5 | from tabulate import tabulate 6 | from pymicmac import utils_execution 7 | 8 | 9 | def run(originalHomol, compareHomols): 10 | table = [] 11 | header = ['#Name', 'Homol dec'] 12 | 13 | rootHomolSize = utils_execution.getSize(originalHomol) 14 | 15 | compareHomols = compareHomols.split(',') 16 | 17 | for compareHomol in compareHomols: 18 | homolSize = utils_execution.getSize(compareHomol) 19 | pattern = "%0.4f" 20 | if homolSize > 0: 21 | table.append([compareHomol, pattern % 22 | ((homolSize / rootHomolSize))]) 23 | else: 24 | table.append([compareHomol, '-']) 25 | 26 | print("###########") 27 | print("Ratio Homol") 28 | print("###########") 29 | print(tabulate(table, headers=header)) 30 | print() 31 | 32 | 33 | def argument_parser(): 34 | # define argument menu 35 | description = "Gets statistics of comparing Homol folder in different execution folders" 36 | parser = argparse.ArgumentParser(description=description) 37 | # fill argument groups 38 | parser.add_argument( 39 | '-o', 40 | '--original', 41 | default='', 42 | help='Original Homol folder', 43 | type=str, 44 | required=True) 45 | parser.add_argument( 46 | '-c', 47 | '--compare', 48 | default='', 49 | help='Comma-separated Homol folder to compare', 50 | type=str, 51 | required=True) 52 | return parser 53 | 54 | 55 | def main(): 56 | try: 57 | a = utils_execution.apply_argument_parser(argument_parser()) 58 | run(a.original, a.compare) 59 | except Exception as e: 60 | print(e) 61 | 62 | 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /pymicmac/logsparser/get_redtiep_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import argparse 4 | from tabulate import tabulate 5 | from pymicmac import utils_execution 6 | 7 | 8 | def run(foldersNames): 9 | header = ['#Name', 'CInit', 'Ini', 'CEnd', 'End'] 10 | table = [] 11 | for folderName in foldersNames.split(','): 12 | if folderName.endswith('/'): 13 | folderName = folderName[:-1] 14 | logFileName = folderName + '/RedTieP.log' 15 | logFolderName = folderName + '/RedTieP_logs' 16 | if os.path.isdir(logFolderName): 17 | lines = os.popen('cat ' + logFolderName + '/*').read().split('\n') 18 | elif os.path.isfile(logFileName): 19 | lines = open(logFileName, 'r').read().split('\n') 20 | inits = [] 21 | ends = [] 22 | c1 = 0 23 | c2 = 0 24 | for line in lines: 25 | if line.count('#InitialHomolPoints:'): 26 | c1 += 1 27 | inits.append( 28 | int(line.split(' ')[0].split(':')[-1].replace('.', ''))) 29 | if line.count('#HomolPoints:'): 30 | c2 += 1 31 | ends.append( 32 | int(line.split(' ')[1].split('=>')[-1].split('(')[0])) 33 | 34 | table.append([folderName, str(c1), str( 35 | sum(inits)), str(c2), str(sum(ends))]) 36 | 37 | print("#################") 38 | print("RedTieP reduction") 39 | print("#################") 40 | print(tabulate(table, headers=header)) 41 | 42 | 43 | def argument_parser(): 44 | # define argument menu 45 | description = "Gets statistics of RedTieP runs in one or more execution folders" 46 | parser = argparse.ArgumentParser(description=description) 47 | parser.add_argument( 48 | '-f', 49 | '--folders', 50 | default='', 51 | help='Comma-separated list of execution folders where to look for the RedTieP.log files (or RedTieP_logs folders if RedTieP was executed with Noodels)', 52 | type=str, 53 | required=True) 54 | return parser 55 | 56 | 57 | def main(): 58 | try: 59 | a = utils_execution.apply_argument_parser(argument_parser()) 60 | run(a.folders) 61 | except Exception as e: 62 | print(e) 63 | 64 | 65 | if __name__ == "__main__": 66 | main() 67 | -------------------------------------------------------------------------------- /pymicmac/logsparser/get_tapas_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import os 4 | import argparse 5 | from tabulate import tabulate 6 | from pymicmac import utils_execution 7 | 8 | 9 | def run(foldersNames): 10 | table = [] 11 | header = ['#Name', 'NumIter', 'Res', 'Wor'] 12 | 13 | for folderName in foldersNames.split(','): 14 | if folderName.endswith('/'): 15 | folderName = folderName[:-1] 16 | logFileName = folderName + '/Tapas.log' 17 | if os.path.isfile(logFileName): 18 | lines = open(logFileName, 'r').read().split('\n') 19 | residuals = [] 20 | worsts = [] 21 | c1 = 0 22 | for line in lines: 23 | if line.count('Residual = '): 24 | c1 += 1 25 | residuals.append( 26 | line.split(';;')[0].replace( 27 | '| | Residual = ', '')) 28 | elif line.count(' Worst, Res '): 29 | worsts.append( 30 | line.split('for')[0].replace( 31 | '| | Worst, Res ', '')) 32 | if len(worsts) and len(residuals): 33 | table.append([folderName, str(c1), residuals[-1], worsts[-1]]) 34 | else: 35 | table.append([folderName, '-', '-', '-']) 36 | else: 37 | table.append([folderName, '-', '-', '-']) 38 | 39 | print("##########################") 40 | print("Tapas last residuals/worts") 41 | print("##########################") 42 | print(tabulate(table, headers=header)) 43 | print() 44 | 45 | 46 | def argument_parser(): 47 | # define argument menu 48 | description = "Gets statistics of Tapas runs in one or more execution folders" 49 | parser = argparse.ArgumentParser(description=description) 50 | parser.add_argument( 51 | '-f', 52 | '--folders', 53 | default='', 54 | help='Comma-separated list of execution folders where to look for the Tapas.log files', 55 | type=str, 56 | required=True) 57 | return parser 58 | 59 | 60 | def main(): 61 | try: 62 | a = utils_execution.apply_argument_parser(argument_parser()) 63 | run(a.folders) 64 | except Exception as e: 65 | print(e) 66 | 67 | 68 | if __name__ == "__main__": 69 | main() 70 | -------------------------------------------------------------------------------- /pymicmac/logsplotter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/logsplotter/__init__.py -------------------------------------------------------------------------------- /pymicmac/logsplotter/plot_campari_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import math 4 | import argparse 5 | import matplotlib.pyplot as plt 6 | from tabulate import tabulate 7 | from pymicmac import utils_execution 8 | 9 | 10 | def run(xmlFile, foldersNames): 11 | (gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile) 12 | 13 | fig = plt.figure(figsize=(27, 15)) 14 | fig.subplots_adjust( 15 | left=0.01, 16 | bottom=0.01, 17 | right=0.99, 18 | top=0.99, 19 | wspace=0.005, 20 | hspace=0.005) 21 | 22 | gcpsUVW = {} 23 | cpsUVW = {} 24 | 25 | foldersNames = foldersNames.split(',') 26 | numFolders = len(foldersNames) 27 | for i in range(numFolders): 28 | if foldersNames[i].endswith('/'): 29 | foldersNames[i] = foldersNames[i][:-1] 30 | 31 | nY = int(math.ceil(math.sqrt(numFolders))) 32 | nX = int(math.ceil(numFolders / nY)) 33 | 34 | 35 | for i in range(numFolders): 36 | folderName = foldersNames[i] 37 | if folderName.endswith('/'): 38 | folderName = folderName[:-1] 39 | logFileName = folderName + '/Campari.log' 40 | 41 | if os.path.isfile(logFileName): 42 | lines = open(logFileName, 'r').read().split('\n') 43 | 44 | gcpsUVW[folderName] = {} 45 | cpsUVW[folderName] = {} 46 | 47 | eiLinesIndexes = [] 48 | for j in range(len(lines)): 49 | if lines[j].count('End Iter'): 50 | eiLinesIndexes.append(j) 51 | 52 | for j in range(eiLinesIndexes[-2], len(lines)): 53 | line = lines[j] 54 | if line.count('Dist'): 55 | gcp = line.split()[1] 56 | fields = line.split('[')[-1].split(']')[0].split(',') 57 | u = float(fields[0]) 58 | v = float(fields[1]) 59 | w = float(fields[2]) 60 | d = float(line.split('Dist')[-1].split()[0].split('=')[-1]) 61 | 62 | if gcp in cpsXYZ: 63 | cpsUVW[folderName][gcp] = (u, v, w, d) 64 | elif gcp in gcpsXYZ: 65 | gcpsUVW[folderName][gcp] = (u, v, w, d) 66 | else: 67 | raise Exception('GCP/CP: ' + gcp + ' not found') 68 | 69 | ax = fig.add_subplot(nX, nY, i + 1, projection='3d') 70 | 71 | (xs, ys, zs, us, vs, ws) = ([], [], [], [], [], []) 72 | for gcp in gcpsUVW[folderName]: 73 | (x, y, z) = gcpsXYZ[gcp] 74 | (u, u, w, _) = gcpsUVW[folderName][gcp] 75 | xs.append(x) 76 | ys.append(y) 77 | zs.append(z) 78 | us.append(u) 79 | vs.append(v) 80 | ws.append(w) 81 | # print(x, y, z, u, v, w) 82 | ax.text(x, y, z, gcp, color='blue', fontsize=6) 83 | ax.scatter(xs, ys, zs, marker='o', c='blue') 84 | ax.quiver( 85 | xs, 86 | ys, 87 | zs, 88 | us, 89 | vs, 90 | ws, 91 | length=1.0, 92 | pivot="tail", 93 | color='blue') 94 | 95 | (xs, ys, zs, us, vs, ws) = ([], [], [], [], [], []) 96 | for cp in cpsUVW[folderName]: 97 | (x, y, z) = cpsXYZ[cp] 98 | (u, u, w, _) = cpsUVW[folderName][cp] 99 | xs.append(x) 100 | ys.append(y) 101 | zs.append(z) 102 | us.append(u) 103 | vs.append(v) 104 | ws.append(w) 105 | # print(x, y, z, u, v, w) 106 | ax.text(x, y, z, cp, color='red', fontsize=6) 107 | ax.scatter(xs, ys, zs, marker='o', c='red') 108 | ax.quiver( 109 | xs, 110 | ys, 111 | zs, 112 | us, 113 | vs, 114 | ws, 115 | length=1.0, 116 | pivot="tail", 117 | color='red') 118 | 119 | ax.set_xlabel('X', fontsize=8, labelpad=-5) 120 | ax.set_ylabel('Y', fontsize=8, labelpad=-5) 121 | ax.set_zlabel('Z', fontsize=8, labelpad=-5) 122 | ax.set_title(folderName, fontsize=8) 123 | ax.tick_params(labelsize=6, direction='out', pad=-1) 124 | ax.tick_params(axis='z', labelsize=0, pad=-3) 125 | 126 | blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b") 127 | red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r") 128 | ax.legend([blue_proxy, red_proxy], ['GCPs', 'CPs'], 129 | loc='upper right', bbox_to_anchor=(0.9, 0.9), prop={'size': 6}) 130 | ax.view_init(elev=-90., azim=0.) 131 | 132 | # ax.set_zlim(1,6) 133 | 134 | table = [] 135 | for gcp in sorted(gcpsXYZ): 136 | row = [gcp, ] 137 | for i in range(numFolders): 138 | folderName = foldersNames[i] 139 | if folderName in gcpsUVW and gcp in gcpsUVW[folderName]: 140 | row.append(gcpsUVW[folderName][gcp][-1]) 141 | else: 142 | row.append('-') 143 | table.append(row) 144 | 145 | print("########################") 146 | print("Campari Dist per GCP/CP") 147 | print("########################") 148 | 149 | header = ['GCP', ] + foldersNames 150 | print(tabulate(table, headers=header)) 151 | print() 152 | 153 | table = [] 154 | for cp in sorted(cpsXYZ): 155 | row = [cp, ] 156 | for i in range(numFolders): 157 | folderName = foldersNames[i] 158 | if folderName in cpsUVW and cp in cpsUVW[folderName]: 159 | row.append(cpsUVW[folderName][cp][-1]) 160 | else: 161 | row.append('-') 162 | table.append(row) 163 | 164 | header = ['CP', ] + foldersNames 165 | print(tabulate(table, headers=header)) 166 | print() 167 | 168 | plt.show() 169 | 170 | 171 | def argument_parser(): 172 | # define argument menu 173 | description = "Plots a 3D quiver of Campari runs in one or more execution folders" 174 | parser = argparse.ArgumentParser(description=description) 175 | parser.add_argument( 176 | '-x', 177 | '--xml', 178 | default='', 179 | help='XML file with the 3D position of the GCPs (and possible CPs)', 180 | type=str, 181 | required=True) 182 | parser.add_argument( 183 | '-f', 184 | '--folders', 185 | default='', 186 | help='Comma-separated list of execution folders where to look for the Campari.log files', 187 | type=str, 188 | required=True) 189 | return parser 190 | 191 | 192 | def main(): 193 | try: 194 | a = utils_execution.apply_argument_parser(argument_parser()) 195 | run(a.xml, a.folders) 196 | except Exception as e: 197 | print(e) 198 | 199 | 200 | if __name__ == "__main__": 201 | main() 202 | -------------------------------------------------------------------------------- /pymicmac/logsplotter/plot_gcpbascule_nums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import math 4 | import argparse 5 | from mpl_toolkits.mplot3d import axes3d 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | from lxml import etree 9 | from tabulate import tabulate 10 | from pymicmac import utils_execution 11 | 12 | 13 | def run(xmlFile, foldersNames): 14 | 15 | (gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile) 16 | 17 | fig = plt.figure(figsize=(27, 15)) 18 | fig.subplots_adjust( 19 | left=0.01, 20 | bottom=0.01, 21 | right=0.99, 22 | top=0.99, 23 | wspace=0.005, 24 | hspace=0.005) 25 | 26 | gcpsUVW = {} 27 | cpsUVW = {} 28 | 29 | foldersNames = foldersNames.split(',') 30 | numFolders = len(foldersNames) 31 | 32 | nY = int(math.ceil(math.sqrt(numFolders))) 33 | nX = int(math.ceil(numFolders / nY)) 34 | 35 | numAxis = int(math.ceil(math.sqrt(numFolders))) 36 | 37 | for i in range(numFolders): 38 | if foldersNames[i].endswith('/'): 39 | foldersNames[i] = foldersNames[i][:-1] 40 | 41 | for i in range(numFolders): 42 | folderName = foldersNames[i] 43 | logFileName = folderName + '/GCPBascule.log' 44 | 45 | if os.path.isfile(logFileName): 46 | lines = open(logFileName, 'r').read().split('\n') 47 | dists = [] 48 | 49 | gcpsUVW[folderName] = {} 50 | cpsUVW[folderName] = {} 51 | 52 | for line in lines: 53 | if line.count('Dist'): 54 | gcp = line.split()[1] 55 | fields = line.split('[')[-1].split(']')[0].split(',') 56 | u = float(fields[0]) 57 | v = float(fields[1]) 58 | w = float(fields[2]) 59 | d = float(line.split('Dist')[-1].split()[0].split('=')[-1]) 60 | 61 | if gcp in cpsXYZ: 62 | cpsUVW[folderName][gcp] = (u, v, w, d) 63 | elif gcp in gcpsXYZ: 64 | gcpsUVW[folderName][gcp] = (u, v, w, d) 65 | else: 66 | raise Exception('GCP/CP: ' + gcp + ' not found') 67 | 68 | ax = fig.add_subplot(nX, nY, i + 1, projection='3d') 69 | 70 | (xs, ys, zs, us, vs, ws) = ([], [], [], [], [], []) 71 | for gcp in gcpsUVW[folderName]: 72 | (x, y, z) = gcpsXYZ[gcp] 73 | (u, u, w, _) = gcpsUVW[folderName][gcp] 74 | xs.append(x) 75 | ys.append(y) 76 | zs.append(z) 77 | us.append(u) 78 | vs.append(v) 79 | ws.append(w) 80 | # print(x, y, z, u, v, w) 81 | ax.text(x, y, z, gcp, color='blue', fontsize=6) 82 | ax.scatter(xs, ys, zs, marker='o', c='blue') 83 | ax.quiver( 84 | xs, 85 | ys, 86 | zs, 87 | us, 88 | vs, 89 | ws, 90 | length=1.0, 91 | pivot="tail", 92 | color='blue') 93 | 94 | (xs, ys, zs, us, vs, ws) = ([], [], [], [], [], []) 95 | for cp in cpsUVW[folderName]: 96 | (x, y, z) = cpsXYZ[cp] 97 | (u, u, w, _) = cpsUVW[folderName][cp] 98 | xs.append(x) 99 | ys.append(y) 100 | zs.append(z) 101 | us.append(u) 102 | vs.append(v) 103 | ws.append(w) 104 | # print(x, y, z, u, v, w) 105 | ax.text(x, y, z, cp, color='red', fontsize=6) 106 | ax.scatter(xs, ys, zs, marker='o', c='red') 107 | ax.quiver( 108 | xs, 109 | ys, 110 | zs, 111 | us, 112 | vs, 113 | ws, 114 | length=1.0, 115 | pivot="tail", 116 | color='red') 117 | 118 | ax.set_xlabel('X', fontsize=8, labelpad=-5) 119 | ax.set_ylabel('Y', fontsize=8, labelpad=-5) 120 | ax.set_zlabel('Z', fontsize=8, labelpad=-5) 121 | ax.set_title(folderName, fontsize=8) 122 | ax.tick_params(labelsize=6, direction='out', pad=-1) 123 | # ax.tick_params(axis='z', labelsize=0, pad=-3) 124 | 125 | blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b") 126 | red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r") 127 | ax.legend([blue_proxy, red_proxy], ['GCPs', 'CPs'], 128 | loc='upper right', bbox_to_anchor=(0.9, 0.9), prop={'size': 6}) 129 | ax.view_init(elev=-90., azim=0.) 130 | 131 | # ax.set_zlim(1,6) 132 | 133 | print("##########################") 134 | print("GCPBascule Dist per GCP/CP") 135 | print("##########################") 136 | 137 | table = [] 138 | for gcp in sorted(gcpsXYZ): 139 | row = [gcp, ] 140 | for i in range(numFolders): 141 | folderName = foldersNames[i] 142 | if folderName in gcpsUVW and gcp in gcpsUVW[folderName]: 143 | row.append(gcpsUVW[folderName][gcp][-1]) 144 | else: 145 | row.append('-') 146 | table.append(row) 147 | 148 | header = ['GCP', ] + foldersNames 149 | print(tabulate(table, headers=header)) 150 | print() 151 | 152 | table = [] 153 | for cp in sorted(cpsXYZ): 154 | row = [cp, ] 155 | for i in range(numFolders): 156 | folderName = foldersNames[i] 157 | if folderName in cpsUVW and cp in cpsUVW[folderName]: 158 | row.append(cpsUVW[folderName][cp][-1]) 159 | else: 160 | row.append('-') 161 | table.append(row) 162 | 163 | header = ['CP', ] + foldersNames 164 | print(tabulate(table, headers=header)) 165 | print() 166 | 167 | plt.show() 168 | 169 | 170 | def argument_parser(): 171 | # define argument menu 172 | description = "Plots a 3D quiver of GCPBascule runs in one or more execution folders" 173 | parser = argparse.ArgumentParser(description=description) 174 | parser.add_argument( 175 | '-x', 176 | '--xml', 177 | default='', 178 | help='XML file with the 3D position of the GCPs (and possible CPs)', 179 | type=str, 180 | required=True) 181 | parser.add_argument( 182 | '-f', 183 | '--folders', 184 | default='', 185 | help='Comma-separated list of execution folders where to look for the GCPBascule.log files', 186 | type=str, 187 | required=True) 188 | return parser 189 | 190 | 191 | def main(): 192 | try: 193 | a = utils_execution.apply_argument_parser(argument_parser()) 194 | run(a.xml, a.folders) 195 | except Exception as e: 196 | print(e) 197 | 198 | 199 | if __name__ == "__main__": 200 | main() 201 | -------------------------------------------------------------------------------- /pymicmac/logsplotter/plot_gcps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import os 4 | import math 5 | import argparse 6 | from mpl_toolkits.mplot3d import axes3d 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | from lxml import etree 10 | from pymicmac import utils_execution 11 | 12 | 13 | def run(xmlFile): 14 | 15 | (gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile) 16 | 17 | fig = plt.figure(figsize=(27, 15)) 18 | fig.subplots_adjust( 19 | left=0.01, 20 | bottom=0.01, 21 | right=0.99, 22 | top=0.99, 23 | wspace=0.005, 24 | hspace=0.005) 25 | 26 | ax = fig.add_subplot(1, 1, 1, projection='3d') 27 | 28 | (xs, ys, zs) = ([], [], []) 29 | for gcp in gcpsXYZ: 30 | (x, y, z) = gcpsXYZ[gcp] 31 | xs.append(x) 32 | ys.append(y) 33 | zs.append(z) 34 | ax.text(x, y, z, gcp, color='blue', fontsize=6) 35 | ax.scatter(xs, ys, zs, marker='o', c='blue') 36 | 37 | (xs, ys, zs) = ([], [], []) 38 | for cp in cpsXYZ: 39 | (x, y, z) = cpsXYZ[cp] 40 | xs.append(x) 41 | ys.append(y) 42 | zs.append(z) 43 | ax.text(x, y, z, cp, color='red', fontsize=6) 44 | ax.scatter(xs, ys, zs, marker='o', c='red') 45 | 46 | ax.set_xlabel('X', fontsize=8, labelpad=-5) 47 | ax.set_ylabel('Y', fontsize=8, labelpad=-5) 48 | ax.set_zlabel('Z', fontsize=8, labelpad=-5) 49 | 50 | ax.tick_params(labelsize=6, direction='out', pad=-1) 51 | ax.tick_params(axis='z', labelsize=0, pad=-3) 52 | 53 | blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b") 54 | red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r") 55 | ax.legend([blue_proxy, red_proxy], ['GCPs', 'CPs'], 56 | loc='upper right', bbox_to_anchor=(0.9, 0.9), prop={'size': 6}) 57 | ax.view_init(elev=-90., azim=0.) 58 | 59 | plt.show() 60 | 61 | 62 | def argument_parser(): 63 | # define argument menu 64 | description = "Plots the 3D positions of GCPs/CPS" 65 | parser = argparse.ArgumentParser(description=description) 66 | parser.add_argument( 67 | '-x', 68 | '--xml', 69 | default='', 70 | help='XML file with the 3D position of the GCPs (and possible CPs)', 71 | type=str, 72 | required=True) 73 | return parser 74 | 75 | 76 | def main(): 77 | try: 78 | a = utils_execution.apply_argument_parser(argument_parser()) 79 | run(a.xml) 80 | except Exception as e: 81 | print(e) 82 | 83 | 84 | if __name__ == "__main__": 85 | main() 86 | -------------------------------------------------------------------------------- /pymicmac/logsplotter/plot_tiep.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import math 4 | import argparse 5 | import matplotlib.pyplot as plt 6 | import matplotlib.gridspec as gridspec 7 | from pymicmac import utils_execution 8 | 9 | 10 | def run(inputArgument, maxX, maxY): 11 | if os.path.isdir(inputArgument): 12 | inputFiles = os.listdir(inputArgument) 13 | num = len(inputFiles) 14 | numCols = int(math.ceil(math.sqrt(num))) 15 | numRows = int(2 * numCols) 16 | 17 | plt.figure(figsize=(18, 10)) 18 | 19 | gs = gridspec.GridSpec(numRows, numCols) 20 | gs.update(hspace=0.3) 21 | 22 | for i in range(num): 23 | 24 | lines = open( 25 | inputArgument + '/' + inputFiles[i], 26 | 'r').read().split('\n') 27 | 28 | x1 = [] 29 | y1 = [] 30 | x2 = [] 31 | y2 = [] 32 | 33 | for line in lines: 34 | fields = line.split() 35 | if len(fields) == 4: 36 | x1.append(float(fields[0])) 37 | y1.append(float(fields[1])) 38 | x2.append(float(fields[2])) 39 | y2.append(float(fields[3])) 40 | 41 | ax1 = plt.subplot(gs[int(2 * int(i / numCols)), i % numCols]) 42 | ax2 = plt.subplot(gs[int(1 + (2 * int(i / numCols))), i % numCols]) 43 | 44 | vis = False 45 | 46 | ax1.get_xaxis().set_visible(vis) 47 | ax1.get_yaxis().set_visible(vis) 48 | ax2.get_xaxis().set_visible(vis) 49 | ax2.get_yaxis().set_visible(vis) 50 | 51 | ax1.set_xlim([0, maxX]) 52 | ax1.set_ylim([0, maxY]) 53 | ax2.set_xlim([0, maxX]) 54 | ax2.set_ylim([0, maxY]) 55 | 56 | ax1.plot(x1, y1, 'b.') 57 | ax1.set_title(inputArgument + '/' + inputFiles[i], fontsize=6) 58 | ax2.plot(x2, y2, 'r.') 59 | #ax2.set_title(inputFiles[i], fontsize=6) 60 | else: 61 | lines = open(inputArgument, 'r').read().split('\n') 62 | 63 | x1 = [] 64 | y1 = [] 65 | x2 = [] 66 | y2 = [] 67 | 68 | for line in lines: 69 | fields = line.split() 70 | if len(fields) == 4: 71 | x1.append(float(fields[0])) 72 | y1.append(float(fields[1])) 73 | x2.append(float(fields[2])) 74 | y2.append(float(fields[3])) 75 | 76 | plt.subplot(2, 1, 1) 77 | plt.plot(x1, y1, 'b.') 78 | 79 | plt.title(inputArgument) 80 | 81 | plt.subplot(2, 1, 2) 82 | plt.plot(x2, y2, 'r.') 83 | 84 | plt.set_xlim([0, maxX]) 85 | plt.set_ylim([0, maxY]) 86 | 87 | plt.show() 88 | 89 | 90 | def argument_parser(): 91 | # define argument menu 92 | description = "Plots the tie-points from a single tie-points file of from the files of an image subfolder Homol folder" 93 | parser = argparse.ArgumentParser(description=description) 94 | parser.add_argument( 95 | '-i', 96 | '--input', 97 | default='', 98 | help='Input argument. Can be a single tie-points file or a subfolder in Homol folder (tie-points files must be in ASCII format, use ExpTxt=1 when running Tapioca)', 99 | type=str, 100 | required=True) 101 | parser.add_argument( 102 | '--maxx', 103 | default='', 104 | help='Maximum X value', 105 | type=int, 106 | required=True) 107 | parser.add_argument( 108 | '--maxy', 109 | default='', 110 | help='Maximum Y value', 111 | type=int, 112 | required=True) 113 | return parser 114 | 115 | 116 | def main(): 117 | try: 118 | a = utils_execution.apply_argument_parser(argument_parser()) 119 | run(a.input, a.maxx, a.maxy) 120 | except Exception as e: 121 | print(e) 122 | 123 | 124 | if __name__ == "__main__": 125 | main() 126 | -------------------------------------------------------------------------------- /pymicmac/noodles/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/noodles/__init__.py -------------------------------------------------------------------------------- /pymicmac/noodles/noodles_exe_parallel.py: -------------------------------------------------------------------------------- 1 | import noodles 2 | from noodles.workflow import (get_workflow) 3 | from noodles.run.connection import (Connection) 4 | from noodles.run.queue import (Queue) 5 | from noodles.run.haploid import (push, branch, patch, sink_map, push_map) 6 | from noodles.run.worker import (worker) 7 | from noodles.run.thread_pool import (thread_pool) 8 | from noodles.run.scheduler import (Scheduler) 9 | from noodles.display import NCDisplay 10 | 11 | 12 | import subprocess 13 | import sys 14 | import argparse 15 | import json 16 | import shlex 17 | import os 18 | 19 | import threading 20 | from itertools import repeat 21 | 22 | 23 | @push_map 24 | def log_job_start(key, job): 25 | return (key, 'start', job, None) 26 | 27 | 28 | @push_map 29 | def log_job_schedule(key, job): 30 | return (key, 'schedule', job, None) 31 | 32 | 33 | logFolderAbsPath = "" 34 | 35 | 36 | class Job: 37 | def __init__(self, task, exclude, state, job, key): 38 | self.task = task 39 | self.exclude = exclude 40 | self.state = state 41 | self.job = job 42 | self.key = key 43 | 44 | 45 | def dynamic_exclusion_worker(display, n_threads): 46 | """This worker allows mutualy exclusive jobs to start safely. The 47 | user provides the information on which jobs exclude the simultaneous 48 | execution of other jobs:: 49 | a = task() 50 | b = task() 51 | update_hints(a, {'task': '1', 'exclude': ['2']}) 52 | update_hints(b, {'task': '2', 'exclude': ['1']}) 53 | run(gather(a, b)) 54 | Using this worker, when task ``a`` is sent to the underlying worker, 55 | task ``b`` is blocked until ``a`` completes, and vice versa. 56 | """ 57 | LogQ = Queue() 58 | 59 | Scheduler(error_handler=display.error_handler) 60 | 61 | threading.Thread( 62 | target=patch, 63 | args=(LogQ.source, sink_map(display)), 64 | daemon=True).start() 65 | 66 | W = Queue() \ 67 | >> branch(log_job_start >> LogQ.sink) \ 68 | >> thread_pool(*repeat(worker, n_threads)) \ 69 | >> branch(LogQ.sink) 70 | 71 | result_source, job_sink = W.setup() 72 | 73 | jobs = {} 74 | key_task = {} 75 | 76 | @push 77 | def pass_job(): 78 | """The scheduler sends jobs to this coroutine. If the 'exclude' key 79 | is found in the hints, it is run in exclusive mode. We keep an internal 80 | record of these jobs, and whether they are 'waiting', 'running' or 'done'. 81 | """ 82 | while True: 83 | key, job = yield 84 | 85 | if job.hints and 'exclude' in job.hints: 86 | j = Job(task=job.hints['task'], 87 | exclude=job.hints['exclude'], 88 | state='waiting', 89 | job=job, 90 | key=key) 91 | jobs[j.task] = j 92 | key_task[key] = j.task 93 | try_to_start(j.task) 94 | 95 | else: 96 | job_sink.send((key, job)) 97 | 98 | def is_not_running(task): 99 | """Checks if a task is not running.""" 100 | return not (task in jobs and jobs[task].state == 'running') 101 | 102 | def try_to_start(task): 103 | """Try to start a task. This only succeeds if the task hasn't already 104 | run, and no jobs are currently running that is excluded by the task.""" 105 | if jobs[task].state != 'waiting': 106 | return 107 | 108 | if all(is_not_running(i) for i in jobs[task].exclude): 109 | jobs[task].state = 'running' 110 | key, job = jobs[task].key, jobs[task].job 111 | job_sink.send((key, job)) 112 | 113 | def finish(key): 114 | """Finish a job. This function is called when we recieve a result.""" 115 | task = key_task[key] 116 | jobs[task].state = 'done' 117 | for i in jobs[task].exclude: 118 | try_to_start(i) 119 | 120 | def pass_result(): 121 | """Recieve a result; finish the task in the register and send the result 122 | back to the scheduler.""" 123 | for key, status, result, err in result_source: 124 | if key in key_task: 125 | finish(key) 126 | 127 | yield (key, status, result, err) 128 | 129 | return Connection(pass_result, pass_job) 130 | 131 | 132 | def run(wf, *, display, n_threads=1): 133 | """Run the workflow using the dynamic-exclusion worker.""" 134 | worker = dynamic_exclusion_worker(display, n_threads) 135 | return noodles.Scheduler(error_handler=display.error_handler)\ 136 | .run(worker, get_workflow(wf)) 137 | 138 | 139 | @noodles.schedule_hint(display='{cmd}', confirm=True) 140 | def system_command(cmd, task): 141 | cmd_split = shlex.split(cmd) # list(shlex.shlex(cmd)) 142 | p = subprocess.run( 143 | cmd_split, stdout=subprocess.PIPE, 144 | stderr=subprocess.PIPE, universal_newlines=True) 145 | p.check_returncode() 146 | oFile = open(os.path.join(logFolderAbsPath, task + '.log'), 'w') 147 | oFile.write(p.stdout) 148 | oFile.close() 149 | return p.stdout 150 | 151 | 152 | def make_job(cmd, task_id, exclude): 153 | j = system_command(cmd, task_id) 154 | noodles.update_hints(j, {'task': str(task_id), 155 | 'exclude': [str(x) for x in exclude]}) 156 | return j 157 | 158 | 159 | def error_filter(ex_type, ex_value, ex_tb): 160 | if ex_type is subprocess.CalledProcessError: 161 | return ex_value.stderr 162 | else: 163 | return None 164 | 165 | 166 | def runNoodles(jsonFile, logFolder, numThreads): 167 | global logFolderAbsPath 168 | logFolderAbsPath = os.path.abspath(logFolder) 169 | os.makedirs(logFolderAbsPath) 170 | input = json.load(open(jsonFile, 'r')) 171 | if input[0].get('task') is None: 172 | jobs = [make_job(td['command'], 173 | td['id'], td['exclude']) for td in input] 174 | else: 175 | jobs = [make_job(td['command'], 176 | td['task'], td['exclude']) for td in input] 177 | wf = noodles.gather(*jobs) 178 | with NCDisplay(error_filter) as display: 179 | run(wf, display=display, n_threads=numThreads) 180 | 181 | 182 | def main(): 183 | parser = argparse.ArgumentParser( 184 | description="SOBA: Run a non-directional exclusion graph job.") 185 | parser.add_argument( 186 | '-j', dest='n_threads', type=int, default=1, 187 | help='number of threads to run simultaneously.') 188 | parser.add_argument( 189 | 'target', type=str, 190 | help='a JSON file specifying the graph.') 191 | parser.add_argument( 192 | 'log', type=str, 193 | help='a log folder.') 194 | args = parser.parse_args(sys.argv[1:]) 195 | 196 | runNoodles(args.target, args.log, args.n_threads) 197 | 198 | 199 | if __name__ == "__main__": 200 | main() 201 | -------------------------------------------------------------------------------- /pymicmac/pointcloud/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/pointcloud/__init__.py -------------------------------------------------------------------------------- /pymicmac/pointcloud/create_parcommands_config_file_convert_ply_laz.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import os 4 | import glob 5 | import json 6 | from pymicmac import utils_execution 7 | from lxml import etree 8 | 9 | 10 | def chunks(l, n): 11 | """Yield successive n-sized chunks from l.""" 12 | for i in range(0, len(l), n): 13 | yield l[i:i + n] 14 | 15 | 16 | def run(inputFolder, outputFile, outputFormat, outputFolder, num): 17 | # Check user parameters 18 | if not os.path.isdir(inputFolder): 19 | raise Exception(inputFolder + ' does not exist') 20 | # Check output file and folder 21 | if os.path.isfile(outputFile): 22 | raise Exception(outputFile + ' already exists!') 23 | if os.path.isdir(outputFolder): 24 | raise Exception(outputFolder + ' already exists!') 25 | # create output folder 26 | os.makedirs(outputFolder) 27 | # Check format 28 | if outputFormat not in ('las', 'laz'): 29 | raise Exception('output format must be las or laz') 30 | 31 | inputFiles = glob.glob(inputFolder + '/*ply') 32 | 33 | # Create output file 34 | oFile = open(outputFile, 'w') 35 | globalXMLRootElement = etree.Element('ParCommands') 36 | 37 | # For each tile we extend the tilesImages list with the nearest neighbours 38 | chunkId = 0 39 | for chunk in chunks(inputFiles, num): 40 | chunkXMLFileName = str(chunkId) + '.xml' 41 | chunkXMLRelPath = outputFolder + '/' + chunkXMLFileName 42 | chunkXMLFile = open(chunkXMLRelPath, 'w') 43 | chunkXMLRootElement = etree.Element('ParCommands') 44 | 45 | chunkFiles = [] 46 | chunkOutputFiles = [] 47 | 48 | for inputFile in chunk: 49 | inputFileName = os.path.basename(inputFile) 50 | convertedFileName = inputFileName.replace('ply', outputFormat) 51 | 52 | pdalConfig = { 53 | "pipeline": [ 54 | { 55 | "type": "readers.ply", 56 | "filename": inputFileName 57 | }, 58 | { 59 | "type": "writers.las", 60 | "filename": convertedFileName 61 | } 62 | ] 63 | } 64 | 65 | pdalConfigOutputFileName = inputFileName + '.json' 66 | pdalConfigOutputFileRelPath = outputFolder + '/' + pdalConfigOutputFileName 67 | taskName = inputFileName + '_Conversion' 68 | 69 | with open(pdalConfigOutputFileRelPath, 'w') as outfile: 70 | json.dump(pdalConfig, outfile) 71 | 72 | childOutput = etree.SubElement(chunkXMLRootElement, 'Component') 73 | 74 | childOutputId = etree.SubElement(childOutput, 'id') 75 | childOutputId.text = taskName 76 | 77 | childOutputRequire = etree.SubElement(childOutput, 'require') 78 | childOutputRequire.text = pdalConfigOutputFileName + ' ' + inputFileName 79 | 80 | childOutputCommand = etree.SubElement(childOutput, 'command') 81 | childOutputCommand.text = 'pdal pipeline ' + pdalConfigOutputFileName 82 | 83 | chunkFiles.append(pdalConfigOutputFileRelPath) 84 | chunkFiles.append(inputFile) 85 | 86 | chunkOutputFiles.append( 87 | 'outputData/' + taskName + '/' + convertedFileName) 88 | 89 | chunkXMLFile.write( 90 | etree.tostring( 91 | chunkXMLRootElement, 92 | pretty_print=True, 93 | encoding='utf-8').decode('utf-8')) 94 | chunkXMLFile.close() 95 | 96 | childOutput = etree.SubElement(globalXMLRootElement, 'Component') 97 | 98 | childOutputId = etree.SubElement(childOutput, 'id') 99 | childOutputId.text = str(chunkId) + '_Conversion' 100 | 101 | childOutputRequire = etree.SubElement(childOutput, 'require') 102 | childOutputRequire.text = chunkXMLRelPath + ' ' + ' '.join(chunkFiles) 103 | 104 | childOutputCommand = etree.SubElement(childOutput, 'command') 105 | childOutputCommand.text = ' coeman-par-local -d . -c ' + \ 106 | chunkXMLFileName + ' -e outputData -n ' + str(num) 107 | 108 | childOutputOutput = etree.SubElement(childOutput, 'output') 109 | childOutputOutput.text = ' '.join(chunkOutputFiles) 110 | 111 | chunkId += 1 112 | 113 | oFile.write( 114 | etree.tostring( 115 | globalXMLRootElement, 116 | pretty_print=True, 117 | encoding='utf-8').decode('utf-8')) 118 | oFile.close() 119 | 120 | 121 | def argument_parser(): 122 | # define argument menu 123 | description = "Creates a 2-level pycoeman XML parallel commands configuration file to convert a bunch of ply files into laz/laz using PDAL. The second level is executed with coeman-par-local." 124 | parser = argparse.ArgumentParser(description=description) 125 | # fill argument groups 126 | parser.add_argument( 127 | '-i', 128 | '--input', 129 | default='', 130 | help='Input folder with ply files', 131 | type=str, 132 | required=True) 133 | parser.add_argument( 134 | '-o', 135 | '--output', 136 | default='', 137 | help='pycoeman parallel commands XML configuration file', 138 | type=str, 139 | required=True) 140 | parser.add_argument( 141 | '-f', 142 | '--format', 143 | default='', 144 | help='Output format (las/laz)', 145 | type=str, 146 | required=True) 147 | parser.add_argument( 148 | '-x', 149 | '--folder', 150 | default='', 151 | help='Output parallel configuration folder where to store the created files required by the distributed tool', 152 | type=str, 153 | required=True) 154 | parser.add_argument( 155 | '-n', 156 | '--num', 157 | default='8', 158 | help='Parallelization at level 2 (default is 8)', 159 | type=int, 160 | required=True) 161 | return parser 162 | 163 | 164 | def main(): 165 | try: 166 | a = utils_execution.apply_argument_parser(argument_parser()) 167 | run(a.input, a.output, a.format, a.folder, a.num) 168 | except Exception as e: 169 | print(e) 170 | 171 | 172 | if __name__ == "__main__": 173 | main() 174 | -------------------------------------------------------------------------------- /pymicmac/utils_execution.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import subprocess 4 | from lxml import etree 5 | 6 | 7 | def readGCPXMLFile(xmlFile): 8 | gcpsXYZ = {} 9 | cpsXYZ = {} 10 | 11 | if not os.path.isfile(xmlFile): 12 | raise Exception('ERROR: ' + xmlFile + ' not found') 13 | 14 | e = etree.parse(xmlFile).getroot() 15 | for p in e.getchildren(): 16 | gcp = p.find('NamePt').text 17 | fields = p.find('Pt').text.split() 18 | incertitude = p.find('Incertitude').text 19 | 20 | x = float(fields[0]) 21 | y = float(fields[1]) 22 | z = float(fields[2]) 23 | if incertitude.count('-1'): 24 | cpsXYZ[gcp] = (x, y, z) 25 | else: 26 | gcpsXYZ[gcp] = (x, y, z) 27 | return (gcpsXYZ, cpsXYZ) 28 | 29 | 30 | def getSize(absPath): 31 | (out, _) = subprocess.Popen('du -sb ' + absPath, shell=True, 32 | stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() 33 | try: 34 | return int(out.split()[0]) 35 | except BaseException: 36 | return -1 37 | 38 | 39 | def apply_argument_parser(argumentsParser, options=None): 40 | """ Apply the argument parser. """ 41 | if options is not None: 42 | args = argumentsParser.parse_args(options) 43 | else: 44 | args = argumentsParser.parse_args() 45 | return args 46 | -------------------------------------------------------------------------------- /pymicmac/workflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/workflow/__init__.py -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_matching/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/workflow/distributed_matching/__init__.py -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_matching/create_parcommands_config_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import os 4 | import glob 5 | import numpy 6 | from pymicmac import utils_execution 7 | from lxml import etree 8 | from scipy import spatial 9 | 10 | 11 | def getTileIndex(pX, pY, minX, minY, maxX, maxY, nX, nY): 12 | xpos = int((pX - minX) * nX / (maxX - minX)) 13 | ypos = int((pY - minY) * nY / (maxY - minY)) 14 | # If it is in the edge of the box (in the maximum side) we need to put in 15 | # the last tile 16 | if xpos == nX: 17 | xpos -= 1 18 | if ypos == nY: 19 | ypos -= 1 20 | return (xpos, ypos) 21 | 22 | 23 | def run(orientationFolder, homolFolder, imagesFormat, 24 | numNeighbours, outputFile, outputFolder, num, maltOptions): 25 | # Check user parameters 26 | if not os.path.isdir(orientationFolder): 27 | raise Exception(orientationFolder + ' does not exist') 28 | includeHomol = homolFolder != '' 29 | if includeHomol and not os.path.isdir(homolFolder): 30 | raise Exception(homolFolder + ' does not exist') 31 | 32 | if os.path.isfile(outputFile): 33 | raise Exception(outputFile + ' already exists!') 34 | if os.path.isdir(outputFolder): 35 | raise Exception(outputFolder + ' already exists!') 36 | # create output folder 37 | os.makedirs(outputFolder) 38 | 39 | mmLocalChanDescFile = 'MicMac-LocalChantierDescripteur.xml' 40 | requireLocalChanDescFile = '' 41 | if os.path.isfile(mmLocalChanDescFile): 42 | requireLocalChanDescFile = mmLocalChanDescFile 43 | 44 | # Parse number of tiles in X and Y 45 | nX, nY = [int(e) for e in num.split(',')] 46 | 47 | # Initialize the empty lists of images and 2D points with the x,y 48 | # positions of the cameras 49 | images = [] 50 | camera2DPoints = [] 51 | 52 | # For each image we get the x,y position of the camera and we add the 53 | # image and th epoint in the lists 54 | orientationFiles = glob.glob(orientationFolder + '/Orientation*') 55 | for orientationFile in orientationFiles: 56 | images.append( 57 | os.path.basename(orientationFile).replace( 58 | "Orientation-", 59 | "").replace( 60 | ".xml", 61 | "")) 62 | e = etree.parse(orientationFile).getroot() 63 | (x, y, _) = [float(c) 64 | for c in e.xpath("//Externe")[0].find('Centre').text.split()] 65 | camera2DPoints.append((x, y)) 66 | 67 | if numNeighbours >= len(images): 68 | raise Exception("numNeighbours >= len(images)") 69 | 70 | # Compute the bounding box of all the camera2DPoints 71 | minX, minY = numpy.min(camera2DPoints, axis=0) 72 | maxX, maxY = numpy.max(camera2DPoints, axis=0) 73 | 74 | print("Bounding box: " + ','.join([str(e) 75 | for e in [minX, minY, maxX, maxY]])) 76 | print("Offset bounding box: " + 77 | ','.join([str(e) for e in [0, 0, maxX - minX, maxY - minY]])) 78 | 79 | # Compute the size of the tiles in X and Y 80 | tileSizeX = (maxX - minX) / nX 81 | tileSizeY = (maxY - minY) / nY 82 | 83 | # Create a KDTree to query nearest neighbours 84 | kdtree = spatial.KDTree(camera2DPoints) 85 | 86 | # Check that tiles are small enough with the given images 87 | numSamplePoints = 100 88 | distances = [] 89 | for camera2DPoint in camera2DPoints[:numSamplePoints]: 90 | distances.append(kdtree.query(camera2DPoint, 2)[0][1]) 91 | 92 | # For each tile first we get a list of images whose camera XY position lays within the tile 93 | # note: there may be empty tiles 94 | tilesImages = {} 95 | for i, camera2DPoint in enumerate(camera2DPoints): 96 | pX, pY = camera2DPoint 97 | tileIndex = getTileIndex(pX, pY, minX, minY, maxX, maxY, nX, nY) 98 | if tileIndex not in tilesImages: 99 | tilesImages[tileIndex] = [images[i], ] 100 | else: 101 | tilesImages[tileIndex].append(images[i]) 102 | 103 | # Create output file 104 | oFile = open(outputFile, 'w') 105 | rootOutput = etree.Element('ParCommands') 106 | 107 | # For each tile we extend the tilesImages list with the nearest neighbours 108 | for i in range(nX): 109 | for j in range(nY): 110 | k = (i, j) 111 | (tMinX, tMinY) = (minX + (i * tileSizeX), minY + (j * tileSizeY)) 112 | (tMaxX, tMaxY) = (tMinX + tileSizeX, tMinY + tileSizeY) 113 | tCenterX = tMinX + ((tMaxX - tMinX) / 2.) 114 | tCenterY = tMinY + ((tMaxY - tMinY) / 2.) 115 | if k in tilesImages: 116 | imagesTile = tilesImages[k] 117 | else: 118 | imagesTile = [] 119 | imagesTileSet = set(imagesTile) 120 | 121 | imagesTileSet.update([images[nni] for nni in kdtree.query( 122 | (tCenterX, tCenterY), numNeighbours)[1]]) 123 | imagesTileSet.update( 124 | [images[nni] for nni in kdtree.query((tMinX, tMinY), numNeighbours)[1]]) 125 | imagesTileSet.update( 126 | [images[nni] for nni in kdtree.query((tMinX, tMaxY), numNeighbours)[1]]) 127 | imagesTileSet.update( 128 | [images[nni] for nni in kdtree.query((tMaxX, tMinY), numNeighbours)[1]]) 129 | imagesTileSet.update( 130 | [images[nni] for nni in kdtree.query((tMaxX, tMaxY), numNeighbours)[1]]) 131 | 132 | if includeHomol: 133 | imagesTileSetFinal = imagesTileSet.copy() 134 | # Add to the images for this tile, othe rimages that have 135 | # tie-points with the current images in the tile 136 | for image in imagesTileSet: 137 | imagesTileSetFinal.update( 138 | [e.replace('.dat', '') for e in os.listdir(homolFolder + '/Pastis' + image)]) 139 | imagesTileSet = imagesTileSetFinal 140 | 141 | if len(imagesTileSet) == 0: 142 | raise Exception('EMPTY TILE!') 143 | 144 | tileName = 'tile_' + str(i) + '_' + str(j) 145 | 146 | # Dump the list of images for this tile 147 | tileImageListOutputFileName = outputFolder + '/' + tileName + '.list' 148 | tileImageListOutputFile = open(tileImageListOutputFileName, 'w') 149 | tileImageListOutputFile.write('\n'.join(sorted(imagesTileSet))) 150 | tileImageListOutputFile.close() 151 | 152 | childOutput = etree.SubElement(rootOutput, 'Component') 153 | 154 | childOutputId = etree.SubElement(childOutput, 'id') 155 | childOutputId.text = tileName + '_Matching' 156 | 157 | childOutputImages = etree.SubElement(childOutput, 'requirelist') 158 | childOutputImages.text = outputFolder + '/' + \ 159 | os.path.basename(tileImageListOutputFileName) 160 | 161 | childOutputRequire = etree.SubElement(childOutput, 'require') 162 | childOutputRequire.text = orientationFolder + " " + requireLocalChanDescFile 163 | 164 | childOutputCommand = etree.SubElement(childOutput, 'command') 165 | command = 'echo -e "\n" | mm3d Malt Ortho ".*' + imagesFormat + '" ' + os.path.basename( 166 | orientationFolder) + ' ' + maltOptions + ' "BoxTerrain=[' + ','.join([str(e) for e in (tMinX, tMinY, tMaxX, tMaxY)]) + ']"' 167 | command += '; echo -e "\n" | mm3d Tawny Ortho-MEC-Malt' 168 | command += '; echo -e "\n" | mm3d Nuage2Ply MEC-Malt/NuageImProf_STD-MALT_Etape_8.xml Attr=Ortho-MEC-Malt/Orthophotomosaic.tif Out=' + \ 169 | tileName + '.ply Offs=[' + str(minX) + ',' + str(minY) + ',0]' 170 | childOutputCommand.text = command 171 | 172 | childOutputOutput = etree.SubElement(childOutput, 'output') 173 | childOutputOutput.text = tileName + '.ply' 174 | 175 | oFile.write( 176 | etree.tostring( 177 | rootOutput, 178 | pretty_print=True, 179 | encoding='utf-8').decode('utf-8')) 180 | oFile.close() 181 | 182 | 183 | def argument_parser(): 184 | # define argument menu 185 | description = "Distributed solution for matching, i.e. point cloud generation from images and orientation. Splits the matching of a large area in the matching of many tiles. IMPORTANT: only use for images oriented in cartographic reference systems (tiling is done assuming Z is zenith), ideally for aerial images." 186 | parser = argparse.ArgumentParser(description=description) 187 | # fill argument groups 188 | parser.add_argument( 189 | '-i', 190 | '--inputOrientation', 191 | default='', 192 | help='Orientation folder. Orientation must be in cartographic reference systems', 193 | type=str, 194 | required=True) 195 | parser.add_argument( 196 | '-e', 197 | '--format', 198 | default='', 199 | help='Images format (example jpg or tif)', 200 | type=str, 201 | required=True) 202 | parser.add_argument( 203 | '--neighbours', 204 | default=6, 205 | help='For each tile we consider the images whose XY camera position is in the tile and the K nearest images (default is 6) to each vertex of the tile', 206 | type=int, 207 | required=False) 208 | parser.add_argument( 209 | '-o', 210 | '--output', 211 | default='', 212 | help='pycoeman parallel commands XML configuration file', 213 | type=str, 214 | required=True) 215 | parser.add_argument( 216 | '-f', 217 | '--folder', 218 | default='', 219 | help='Output parallel configuration folder where to store the created files required by the distributed tool', 220 | type=str, 221 | required=True) 222 | parser.add_argument( 223 | '-n', 224 | '--num', 225 | default='', 226 | help='Number of tiles in which the XY extent is divided, specifed as numX,numY', 227 | type=str, 228 | required=True) 229 | parser.add_argument( 230 | '-t', 231 | '--inputHomol', 232 | default='', 233 | help='(Optional) Homol folder with the tie-points. If specified, for each tile we also consider the homol images of the images in the tile [default is disabled]', 234 | type=str, 235 | required=False) 236 | parser.add_argument( 237 | '--maltOptions', 238 | default='', 239 | help='Extra options to pass to Malt (example "SzW=1 Regul=0.01 ZoomF=1"; default is "")', 240 | type=str) 241 | 242 | return parser 243 | 244 | 245 | def main(): 246 | try: 247 | a = utils_execution.apply_argument_parser(argument_parser()) 248 | run(a.inputOrientation, a.inputHomol, a.format, 249 | a.neighbours, a.output, a.folder, a.num, a.maltOptions) 250 | except Exception as e: 251 | print(e) 252 | 253 | 254 | if __name__ == "__main__": 255 | main() 256 | -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_tapioca/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ImproPhoto/pymicmac/75e8dfbc90aeff4301b144ba76fc627a780ec8ac/pymicmac/workflow/distributed_tapioca/__init__.py -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_tapioca/combine_distributed_tapioca_output.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import os 4 | from pymicmac import utils_execution 5 | 6 | 7 | def run(inputFolder, outputFolder): 8 | # Run Tapioca command 9 | if os.path.isdir(outputFolder): 10 | raise Exception(outputFolder + ' already exists!') 11 | os.makedirs(outputFolder) 12 | 13 | for tapiocaCommandId in os.listdir(inputFolder): 14 | tapiocaCommandIdAbsPath = inputFolder + '/' + tapiocaCommandId 15 | tapiocaCommandIdHomolAbsPath = tapiocaCommandIdAbsPath + '/Homol' 16 | if os.path.isdir(tapiocaCommandIdHomolAbsPath) and len( 17 | os.listdir(tapiocaCommandIdHomolAbsPath)): 18 | os.system( 19 | 'cp -r ' + 20 | tapiocaCommandIdHomolAbsPath + 21 | '/* ' + 22 | outputFolder) 23 | else: 24 | print( 25 | 'WARNING: could not find tie-points in ' + 26 | tapiocaCommandIdHomolAbsPath) 27 | 28 | 29 | def argument_parser(): 30 | # define argument menu 31 | description = "Combine Homol folders into single one. To be run after a distributed Tapioca" 32 | parser = argparse.ArgumentParser(description=description) 33 | # fill argument groups 34 | parser.add_argument( 35 | '-i', 36 | '--inputFolder', 37 | default='', 38 | help='Input folder with the subfolders for each distributed Tapioca command. This folder contains subfolders _Tapioca and each subfolder contains a Homol folder', 39 | type=str, 40 | required=True) 41 | parser.add_argument( 42 | '-o', 43 | '--outputFolder', 44 | default='', 45 | help='Output folder', 46 | type=str, 47 | required=True) 48 | return parser 49 | 50 | 51 | def main(): 52 | try: 53 | a = utils_execution.apply_argument_parser(argument_parser()) 54 | run(a.inputFolder, a.outputFolder) 55 | except Exception as e: 56 | print(e) 57 | 58 | 59 | if __name__ == "__main__": 60 | main() 61 | -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_tapioca/create_all_image_pairs_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import os 4 | from pymicmac import utils_execution 5 | 6 | 7 | def run(inputFolder, imageFormat, outputFile): 8 | # Check user parameters 9 | if not os.path.isdir(inputFolder): 10 | raise Exception(inputFolder + " does not exist! (or is not a folder)") 11 | # Create lists of images that have the correct format 12 | images = sorted(os.listdir(inputFolder)) 13 | imagesFormat = [] 14 | for image in images: 15 | if image.endswith(imageFormat): 16 | imagesFormat.append(image) 17 | 18 | if os.path.isfile(outputFile): 19 | raise Exception(outputFile + ' already exists!') 20 | ofile = open(outputFile, 'w') 21 | ofile.write('\n') 22 | ofile.write('\n') 23 | for i in range(len(imagesFormat)): 24 | for j in range(len(imagesFormat)): 25 | if i < j: 26 | ofile.write( 27 | ' ' + 28 | imagesFormat[i] + 29 | ' ' + 30 | imagesFormat[j] + 31 | '\n') 32 | ofile.write( 33 | ' ' + 34 | imagesFormat[j] + 35 | ' ' + 36 | imagesFormat[i] + 37 | '\n') 38 | ofile.write('\n') 39 | ofile.close() 40 | 41 | 42 | def argument_parser(): 43 | # define argument menu 44 | description = "Creates a valid image pairs file suitable for Tapioca (to run with option File). Every possible image pair is added" 45 | parser = argparse.ArgumentParser(description=description) 46 | # fill argument groups 47 | parser.add_argument( 48 | '-i', 49 | '--input', 50 | default='', 51 | help='Input folder with the images', 52 | type=str, 53 | required=True) 54 | parser.add_argument( 55 | '-f', 56 | '--format', 57 | default='', 58 | help='File format of the images (only files with this format are considered for the pairs)', 59 | type=str, 60 | required=True) 61 | parser.add_argument( 62 | '-o', 63 | '--output', 64 | default='', 65 | help='Output valid image pairs file', 66 | type=str, 67 | required=True) 68 | return parser 69 | 70 | 71 | def main(): 72 | try: 73 | a = utils_execution.apply_argument_parser(argument_parser()) 74 | run(a.input, a.format, a.output) 75 | except Exception as e: 76 | print(e) 77 | 78 | 79 | if __name__ == "__main__": 80 | main() 81 | -------------------------------------------------------------------------------- /pymicmac/workflow/distributed_tapioca/create_parcommands_config_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | import shutil 4 | import os 5 | from lxml import etree 6 | from pymicmac import utils_execution 7 | 8 | 9 | def run(inputFile, outputFile, outputFolder, num): 10 | # Check user parameters 11 | if not os.path.isfile(inputFile): 12 | raise Exception(inputFile + ' does not exist') 13 | if os.path.isfile(outputFile): 14 | raise Exception(outputFile + ' already exists!') 15 | outputFileAbsPath = os.path.abspath(outputFile) 16 | if os.path.isdir(outputFolder): 17 | raise Exception(outputFolder + ' already exists!') 18 | if outputFolder[-1] == '/': 19 | outputFolder = outputFolder[:-1] 20 | outputFolderAbsPath = os.path.abspath(outputFolder) 21 | outputFolderName = os.path.basename(outputFolderAbsPath) 22 | if num % 2: 23 | raise Exception('num must be an even number!') 24 | 25 | mmLocalChanDescFile = 'MicMac-LocalChantierDescripteur.xml' 26 | requireLocalChanDescFile = '' 27 | if os.path.isfile(mmLocalChanDescFile): 28 | requireLocalChanDescFile = mmLocalChanDescFile 29 | 30 | # Read input XML with valid iamge pairs 31 | e = etree.parse(inputFile).getroot() 32 | # Create output folder 33 | os.makedirs(outputFolderAbsPath) 34 | 35 | # Create output file 36 | oFile = open(outputFileAbsPath, 'w') 37 | rootOutput = etree.Element('ParCommands') 38 | 39 | pairs = e.findall('Cple') 40 | numPairs = len(pairs) 41 | # Create list of pairs and set of images for a chunk 42 | pairsChunk = [] 43 | imagesSetChunk = set([]) 44 | chunkId = 0 45 | errorImagesSet = set([]) 46 | for i in range(numPairs): 47 | # Add pair and image to current chunk 48 | pair = pairs[i].text 49 | pairsChunk.append(pair) 50 | (image1, image2) = pair.split() 51 | 52 | # image1AbsPath = os.path.abspath(image1) 53 | # image2AbsPath = os.path.abspath(image2) 54 | # 55 | # if not os.path.isfile(image1AbsPath): 56 | # errorImagesSet.add(image1AbsPath) 57 | # if not os.path.isfile(image2AbsPath): 58 | # errorImagesSet.add(image2AbsPath) 59 | # 60 | # imagesSetChunk.add(image1AbsPath) 61 | # imagesSetChunk.add(image2AbsPath) 62 | 63 | if not os.path.isfile(image1): 64 | errorImagesSet.add(image1) 65 | if not os.path.isfile(image2): 66 | errorImagesSet.add(image2) 67 | imagesSetChunk.add(image1) 68 | imagesSetChunk.add(image2) 69 | 70 | if (((i + 1) % num) == 0) or (i == (numPairs - 1) 71 | ): # if currernt chunk is full or we just added the lsat pair, we need to store the chunk data 72 | # Define output files names and absolute paths for this chunk 73 | chunkXMLFileName = str(chunkId) + '_' + os.path.basename(inputFile) 74 | chunkImagesListFileName = chunkXMLFileName + '.list' 75 | chunkXMLFileAbsPath = outputFolderAbsPath + '/' + chunkXMLFileName 76 | chunkImagesListFileAbsPath = outputFolderAbsPath + '/' + chunkImagesListFileName 77 | # Open the output files for this chunk 78 | chunkXMLFile = open(chunkXMLFileAbsPath, 'w') 79 | chunkImagesListFile = open(chunkImagesListFileAbsPath, 'w') 80 | # Dump the XML file with image pairs for this chunk 81 | rootChunk = etree.Element('SauvegardeNamedRel') 82 | for pairChunk in pairsChunk: 83 | childChunk = etree.Element('Cple') 84 | childChunk.text = pairChunk 85 | rootChunk.append(childChunk) 86 | chunkXMLFile.write( 87 | etree.tostring( 88 | rootChunk, 89 | pretty_print=True, 90 | encoding='utf-8').decode('utf-8')) 91 | chunkXMLFile.close() 92 | # Dump the .list file with the images in this chunk 93 | for image in imagesSetChunk: 94 | chunkImagesListFile.write(image + '\n') 95 | chunkImagesListFile.close() 96 | 97 | # Add XML component in MicMac XML distributed computing file 98 | childOutput = etree.Element('Component') 99 | 100 | childOutputId = etree.Element('id') 101 | childOutputId.text = str(chunkId) + '_Tapioca' 102 | childOutput.append(childOutputId) 103 | 104 | childOutputImages = etree.Element('requirelist') 105 | childOutputImages.text = outputFolderName + '/' + chunkImagesListFileName 106 | childOutput.append(childOutputImages) 107 | 108 | childOutputRequire = etree.Element('require') 109 | childOutputRequire.text = outputFolderName + '/' + \ 110 | chunkXMLFileName + " " + requireLocalChanDescFile 111 | childOutput.append(childOutputRequire) 112 | 113 | childOutputCommand = etree.Element('command') 114 | childOutputCommand.text = 'mm3d Tapioca File ' + chunkXMLFileName + ' -1' 115 | childOutput.append(childOutputCommand) 116 | 117 | childOutputOutput = etree.Element('output') 118 | childOutputOutput.text = "Homol" 119 | childOutput.append(childOutputOutput) 120 | 121 | rootOutput.append(childOutput) 122 | # 123 | # Empty the chunk 124 | pairsChunk = [] 125 | imagesSetChunk = set([]) 126 | chunkId += 1 127 | 128 | oFile.write( 129 | etree.tostring( 130 | rootOutput, 131 | pretty_print=True, 132 | encoding='utf-8').decode('utf-8')) 133 | oFile.close() 134 | 135 | # for imageAbsPath in errorImagesSet: 136 | # print("WARNING: " + os.path.basename(imageAbsPath) + " is not located in " + imageAbsPath + '. If you use relative paths or just the image names, be careful to put the XML in the same folder with the images') 137 | 138 | for image in errorImagesSet: 139 | print( 140 | "WARNING: " + 141 | image + 142 | " could not be found. The XML with the valid image pairs must be located in the same folder with the images") 143 | 144 | 145 | def argument_parser(): 146 | # define argument menu 147 | description = "Splits a valid image pairs file suitable for Tapioca into chunks. For each chunk, it adds a component in a pycomean parallel commands XML configuration file, and it stores in a parallel configuration folder the information of the chunk " 148 | parser = argparse.ArgumentParser(description=description) 149 | # fill argument groups 150 | parser.add_argument( 151 | '-i', 152 | '--input', 153 | default='', 154 | help='Input XML valid image pair file', 155 | type=str, 156 | required=True) 157 | parser.add_argument( 158 | '-o', 159 | '--output', 160 | default='', 161 | help='pycoeman parallel commands XML configuration file', 162 | type=str, 163 | required=True) 164 | parser.add_argument( 165 | '-f', 166 | '--folder', 167 | default='', 168 | help='Output parallel configuration folder where to store the created files. For each chunk there will be a XML file with image pairs and a .list file with a list of files', 169 | type=str, 170 | required=True) 171 | parser.add_argument( 172 | '-n', 173 | '--num', 174 | default='', 175 | help='Number of image pairs per chunk (must be even number)', 176 | type=int, 177 | required=True) 178 | return parser 179 | 180 | 181 | def main(): 182 | try: 183 | a = utils_execution.apply_argument_parser(argument_parser()) 184 | run(a.input, a.output, a.folder, a.num) 185 | except Exception as e: 186 | print(e) 187 | 188 | 189 | if __name__ == "__main__": 190 | main() 191 | -------------------------------------------------------------------------------- /pymicmac/workflow/run_workflow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse 3 | from pycoeman.seqcommands import run_seqcommands_local 4 | from pymicmac import utils_execution 5 | 6 | 7 | def argument_parser(): 8 | # define argument menu 9 | description = "Run a set of MicMac commands sequentially (one after the other). The commands are specified by a Worflow XML configuration file. During the execution of each command there is monitoring of the used CPU/MEM/disk by the system." 10 | parser = argparse.ArgumentParser(description=description) 11 | parser.add_argument( 12 | '-d', 13 | '--dataDir', 14 | default='', 15 | help='Data directory that contains all the required data (if using relative paths in and 2 | 3 | 4 | 5 | Aquaris E5 HD 6 | 2.5 4.4 7 | Aquaris E5 HD 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /tests/Ori-IniCal/AutoCal_Foc-3500_Cam-Aquaris_E5_HD.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | eConvApero_DistM2C 4 | 1584 2112 5 | 3151.4322860094735 6 | 3168 4224 7 | 8 | 9 | 10 | 1584 2112 11 | true 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /tests/list.txt: -------------------------------------------------------------------------------- 1 | 1.jpg 2 | 2.jpg 3 | 3.jpg 4 | 4.jpg 5 | 5.jpg 6 | 6.jpg 7 | -------------------------------------------------------------------------------- /tests/matching.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Malt 4 | list.txt 5 | mm3d Malt GeomImage ".*jpg" TapasOut "Master=1.jpg" "DirMEC=Results" UseTA=1 ZoomF=1 ZoomI=32 Purge=true 6 | param-estimation/Ori-TapasOut MicMac-LocalChantierDescripteur.xml 7 | 8 | 9 | Nuage2Ply 10 | mm3d Nuage2Ply "./Results/NuageImProf_STD-MALT_Etape_8.xml" Attr="1.jpg" Out=1.ply 11 | 12 | 13 | -------------------------------------------------------------------------------- /tests/param-estimation.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Tapas 4 | list.txt 5 | tie-point-detection/Homol MicMac-LocalChantierDescripteur.xml 6 | mm3d Tapas Fraser ".*jpg" Out=TapasOut 7 | 8 | 9 | -------------------------------------------------------------------------------- /tests/param-estimation_orireduction.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Martini 4 | list.txt 5 | mm3d Martini ".*jpg" OriCalib=IniCal 6 | tie-point-detection/Homol Ori-IniCal MicMac-LocalChantierDescripteur.xml 7 | 8 | 9 | OriRedTieP 10 | mm3d OriRedTieP ".*jpg" OriCalib=IniCal; rm Homol; mv HomolTiePRed Homol 11 | 12 | 13 | Tapas 14 | mm3d Tapas Fraser ".*jpg" Out=TapasOut InCal=IniCal 15 | 16 | 21 | 22 | -------------------------------------------------------------------------------- /tests/param-estimation_reduction.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | NO_AllOri2Im 4 | list.txt 5 | mm3d TestLib NO_AllOri2Im ".*jpg" Quick=1 6 | tie-point-detection/Homol MicMac-LocalChantierDescripteur.xml 7 | 8 | 9 | RedTieP 10 | 11 | 12 | mm3d RedTieP ".*jpg" ExpSubCom=1; micmac-noodles subcommands.json RedTieP_logs -j 4; rm Homol; mv Homol-Red Homol 13 | 14 | 15 | Tapas 16 | mm3d Tapas Fraser ".*jpg" Out=TapasOut 17 | 18 | 23 | 24 | -------------------------------------------------------------------------------- /tests/run_distributed_tapioca_local_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Before running the test, add the following in include/XML_User/DicoCamera.xml: 4 | # 5 | # Aquaris E5 HD 6 | # 3.52 4.69 7 | # Aquaris E5 HD 8 | # 9 | set -e 10 | 11 | micmac-disttapioca-create-pairs -i . -f jpg -o ImagePairs.xml 12 | micmac-disttapioca-create-config -i ImagePairs.xml -o DistributedTapioca.xml -f DistributedTapioca -n 6 13 | coeman-par-local -d . -e DistributedTapiocaExe -c DistributedTapioca.xml -n 2 14 | micmac-disttapioca-combine -i DistributedTapiocaExe -o Homol 15 | 16 | # Test if we produced expected output files 17 | _match_count=$(ls Homol/Pastis?.jpg/*.dat |wc -w) 18 | if [ $_match_count -ne 30 ] 19 | then 20 | exit 1 21 | fi 22 | -------------------------------------------------------------------------------- /tests/run_workflow_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | echo "Checking dependencies..." 4 | echo "ImageMagick:" 5 | convert --version 6 | echo "exiv2:" 7 | exiv2 --version 8 | echo "exiftool:" 9 | exiftool -ver 10 | 11 | echo "micmac-run-workflow -d . -e tie-point-detection -c tie-point-detection.xml" 12 | micmac-run-workflow -d . -e tie-point-detection -c tie-point-detection.xml 13 | echo "micmac-run-workflow -d . -e param-estimation -c param-estimation.xml" 14 | micmac-run-workflow -d . -e param-estimation -c param-estimation.xml 15 | 16 | #The following commands are not executed because of the issue "run_workflow_test.sh test fails #36" 17 | #echo "micmac-run-workflow -d . -e param-estimation-red -c param-estimation_reduction.xml" 18 | #micmac-run-workflow -d . -e param-estimation-red -c param-estimation_reduction.xml 19 | #echo "micmac-run-workflow -d . -e param-estimation-orired -c param-estimation_orireduction.xml" 20 | #micmac-run-workflow -d . -e param-estimation-orired -c param-estimation_orireduction.xml 21 | 22 | echo "micmac-run-workflow -d . -e matching -c matching.xml" 23 | micmac-run-workflow -d . -e matching -c matching.xml 24 | 25 | # Test if we produced expected ply model 26 | _match_count=$(matching/1.ply |wc -w) 27 | if [ $_match_count -ne 1 ] 28 | then 29 | echo 'Test failed, output ply file wasn't produced.' 30 | exit 1 31 | fi 32 | 33 | echo "done." 34 | -------------------------------------------------------------------------------- /tests/tie-point-detection.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Tapioca 4 | list.txt 5 | mm3d Tapioca All ".*jpg" -1 6 | MicMac-LocalChantierDescripteur.xml 7 | 8 | 9 | 10 | --------------------------------------------------------------------------------