├── .gitignore ├── CITATION.cff ├── Dockerfile ├── LICENSE ├── README.md ├── Readme_UK.md ├── data ├── Potree_UK1.PNG └── Potree_UK_midmerge1.PNG ├── figures └── 2D-simplification-of-the-various-steps.png ├── pympc ├── __init__.py ├── create_pycoeman_config_run_massive_potree_converter.py ├── fill_db_extents.py ├── fill_db_extents_potree.py ├── generate_tiles.py ├── get_info.py ├── get_wkt.py ├── merge_potree.py ├── merge_potree_all.py ├── sort_index.py ├── utils.py └── validate_potree.py ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | .project 2 | .pydevproject 3 | pympc/__pycache__/ 4 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # YAML 1.2 2 | # Metadata for citation of this software according to the CFF format (https://citation-file-format.github.io/) 3 | cff-version: 1.0.3 4 | message: If you use this software, please cite it as below. 5 | title: Massive-PotreeConverter 6 | doi: 10.5281/zenodo.910906 7 | authors: 8 | - given-names: Oscar 9 | family-names: Rubi 10 | name-particle: Martinez 11 | affiliation: Netherlands eScience Center 12 | - given-names: Stefan 13 | family-names: Verhoeven 14 | affiliation: Nederlands eScience Center 15 | - given-names: Yifat 16 | family-names: Dzigan 17 | affiliation: Netherlands eScience Center 18 | - given-names: Gijs 19 | family-names: van Oord 20 | affiliation: Netherlands eScience Center 21 | - given-names: Romulo 22 | family-names: Goncalves 23 | affiliation: Netherlands eScience Center 24 | version: 1.1.0 25 | date-released: 2017-09-21 26 | repository-code: https://github.com/NLeSC/Massive-PotreeConverter 27 | license: Apache-2.0 28 | references: 29 | - type: conference-paper 30 | doi: 10.13140/RG.2.1.1731.4326/1 31 | title: 'Taming the beast: Free and open-source massive point cloud web visualization' 32 | authors: 33 | - given-names: Oscar 34 | family-names: Martinez-Rubi 35 | - given-names: Stefan 36 | family-names: Verhoeven 37 | - given-names: Maarten Van 38 | family-names: Meersbergen 39 | - given-names: Markus 40 | family-names: Schütz 41 | - given-names: Peter Van 42 | family-names: Oosterom 43 | - given-names: Romulo 44 | family-names: Goncalves 45 | - given-names: Theo 46 | family-names: Tijssen 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # DockertFile for the Massive-PotreeConverter 2 | FROM ubuntu:16.04 3 | MAINTAINER Oscar Martinez Rubi 4 | RUN apt-get update -y 5 | 6 | # INSTALL compilers and build toold 7 | RUN apt-get install -y wget git cmake build-essential gcc g++ 8 | 9 | # INSTALL PDAL 10 | RUN apt-get install -y libgeos-dev libproj-dev libtiff-dev libgeotiff-dev 11 | RUN apt-get install -y libgdal-dev 12 | WORKDIR /opt 13 | RUN wget http://download.osgeo.org/laszip/laszip-2.1.0.tar.gz 14 | RUN tar xvfz laszip-2.1.0.tar.gz 15 | WORKDIR /opt/laszip-2.1.0 16 | RUN mkdir makefiles 17 | WORKDIR /opt/laszip-2.1.0/makefiles/ 18 | RUN cmake .. 19 | RUN make; make install 20 | WORKDIR /opt 21 | RUN wget http://download.osgeo.org/pdal/PDAL-1.3.0-src.tar.gz 22 | RUN tar xvzf PDAL-1.3.0-src.tar.gz 23 | WORKDIR /opt/PDAL-1.3.0-src 24 | RUN mkdir makefiles 25 | WORKDIR /opt/PDAL-1.3.0-src/makefiles 26 | RUN apt-get install -y libjsoncpp-dev 27 | RUN cmake -G "Unix Makefiles" ../ 28 | RUN make ; make install 29 | 30 | # INSTALL PotreeConverter 31 | WORKDIR /opt 32 | RUN git clone https://github.com/m-schuetz/LAStools.git LAStools-PC 33 | WORKDIR /opt/LAStools-PC/LASzip 34 | RUN mkdir build 35 | WORKDIR /opt/LAStools-PC/LASzip/build 36 | RUN cmake -DCMAKE_BUILD_TYPE=Release .. 37 | RUN make 38 | WORKDIR /opt 39 | RUN git clone https://github.com/potree/PotreeConverter.git 40 | WORKDIR /opt/PotreeConverter 41 | RUN mkdir build 42 | WORKDIR /opt/PotreeConverter/build 43 | RUN apt-get install -y libboost-all-dev 44 | RUN cmake -DCMAKE_BUILD_TYPE=Release -DLASZIP_INCLUDE_DIRS=/opt/LAStools-PC/LASzip/dll -DLASZIP_LIBRARY=/opt/LAStools-PC/LASzip/build/src/liblaszip.so .. 45 | RUN make ; make install 46 | #RUN ln -s /opt/PotreeConverter/build/PotreeConverter/PotreeConverter /usr/local/bin/PotreeConverter 47 | 48 | # INSTALL LAStools 49 | WORKDIR /opt 50 | RUN wget http://www.cs.unc.edu/~isenburg/lastools/download/lastools.zip 51 | RUN apt-get install -y unzip 52 | RUN unzip lastools.zip 53 | WORKDIR /opt/LAStools/ 54 | RUN make 55 | RUN ln -s /opt/LAStools/bin/lasinfo /usr/local/sbin/lasinfo 56 | RUN ln -s /opt/LAStools/bin/lasmerge /usr/local/sbin/lasmerge 57 | 58 | 59 | # INSTALL pycoeman 60 | RUN apt-get install -y python-pip python-dev build-essential libfreetype6-dev libssl-dev libffi-dev 61 | RUN pip install git+https://github.com/NLeSC/pycoeman 62 | 63 | # INSTALL Massive-PotreeConverter 64 | RUN pip install git+https://github.com/NLeSC/Massive-PotreeConverter 65 | 66 | # Create 3 volumes to be used when running the script. Ideally each run must be mounted to a different physical device 67 | VOLUME ["/data1"] 68 | VOLUME ["/data2"] 69 | VOLUME ["/data3"] 70 | 71 | WORKDIR /data1 72 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Massive-PotreeConverter 2 | 3 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.910906.svg)](https://doi.org/10.5281/zenodo.910906) 4 | 5 | The [PotreeConverter](https://github.com/potree/PotreeConverter) builds potree octree from laz files. 6 | When you have very many or big laz files then running a single PotreeConverter job will take a very long time. 7 | The Massive-PotreeConverter reduces the wallclock time of creating the octree by a divide and conquer approach. 8 | It does this by creating octree's in parallel and merging the octree's into a single octree. 9 | 10 | This repository extends the [PotreeConverter](https://github.com/potree/PotreeConverter) 11 | through a bunch of Python scripts to make it able to convert massive 12 | point clouds to the Potree-OctTree format. 13 | 14 | Massive-PotreeConverter consists of four steps, all executable through command-line tools. 15 | The steps to convert a massive point cloud into the Potree-OctTree are: 16 | - Determine the bounding cube of the massive point cloud. 17 | - Split the point cloud in tiles following a special tiling schema. 18 | - For all the tiles run PotreeConverter to create Potree-OctTrees. We use pycoeman (https://github.com/NLeSC/pycoeman). 19 | - Merge the multiple Potree-OctTrees into a single massive Potree-OctTree. 20 | 21 | All these steps are summarized in the following scheme: ![](figures/2D-simplification-of-the-various-steps.png). 22 | 23 | For a detailed description of each step the user should read [**Taming the beast: Free and open-source massive point cloud web visualization**](http://dx.doi.org/10.13140/RG.2.1.1731.4326/1). 24 | 25 | In addition, this repository also contains tools to: 26 | - Sort and index a bunch of LAS/LAZ files in parallel. 27 | - Dump the extents of a bunch of LAS/LAZ files into a PostGIS database. This is useful for LAStools as a pre-filter step when dealing with large number of files. 28 | - Dump the extents of the nodes of a Potree-OctTree into a PostGIS database. Each node of the tree is stored in a separate file. 29 | 30 | These additional tools can be used to make rectangular selections on the raw data or in the different levels of the Potree-OctTree offering a multi-level selection tool. This is for example done in https://github.com/NLeSC/ahn-pointcloud-viewer-ws/blob/master/src/main/python/create_user_file.py. In this example a LAS/LAZ file is created from the selected data. 31 | 32 | Massive-PotreeConverter has been used for the Dutch [AHN2](http://ahn2.pointclouds.nl) with 640 Billion points. 33 | 34 | ## Requirements 35 | 36 | The following libraries/packages are required for the basic components of Massive-PotreeConverter: 37 | 38 | - [PDAL](http://www.pdal.io/) 39 | - [PotreeConverter](https://github.com/potree/PotreeConverter) 40 | - [pycoeman](https://github.com/NLeSC/pycoeman) 41 | - [LAStools](http://rapidlasso.com/lastools/) (the open-source license is enough). The */bin* should be added to the Linux **PATH**. 42 | 43 | Concretely the following command-line tools must be available: pdal, PotreeConverter, coeman-par-local (or coeman-par-sge or coeman-par-ssh), lasinfo and lasmerge 44 | 45 | For now Massive-PotreeConverter works only in Linux systems. Requires Python 3.5. 46 | 47 | There is a Dockerfile available and a image build in [Docker Hub](https://registry.hub.docker.com/u/oscarmartinezrubi/massive-potreeconverter/). See end of page for information on how to use it. 48 | 49 | ## Installation 50 | 51 | Clone this repository and install it with pip (using a virtualenv is recommended): 52 | 53 | ``` 54 | git clone https://github.com/NLeSC/Massive-PotreeConverter 55 | cd Massive-PotreeConverter 56 | pip install . 57 | ``` 58 | 59 | or install directly with: 60 | 61 | ``` 62 | pip install git+https://github.com/NLeSC/Massive-PotreeConverter 63 | ``` 64 | 65 | ### Installation for additional steps 66 | 67 | In order to perform the additional components some additional libraries/packages have to be installed: 68 | 69 | - To insert extents LAS/LAZ files a Potree-OctTrees in a PostGIS database, the additional requirements are: 70 | - PostgreSQL, PostGIS 71 | - Python modules: psycopg2 72 | 73 | - To sort/index LAS/LAZ files in parallel (allowing faster selection), the additional requirements are: 74 | - LAStools with license. For the licensed-part of LAStools to run in Linux environments wine (https://www.winehq.org/) needs to be installed 75 | 76 | ### Installation tips 77 | 78 | For the installation of PotreeConverter look at https://github.com/potree/PotreeConverter. You will need to add the build executable manually to the PATH. 79 | 80 | Look at the web page of [PDAL](http://www.pdal.io/compilation/unix.html) to install it. You will need to install also GDAL, Geos, GeoTiff and LASzip. Note that for Massive-PotreeConverter there is no need to build PDAL with PostgreSQL support. 81 | 82 | 83 | ## Method 84 | 85 | More detailed steps: 86 | 87 | - mpc-info: We get the bounding cube, the number of points and the average density of the massive point cloud. 88 | First argument is the input folder with all the input data. Second argument is the number of processes we want to use to get the information. 89 | The tool also computes suggested values for the number of tiles and for the Cubic Axis Aligned Bounding Box (CAABB), the spacing, the number of levels and suggested potreeconverter command. These values must be used in the next steps! Assuming [laz input directory] is a folder with a bunch of LAS or LAZ files, run: 90 | ``` 91 | mpc-info -i [laz input directory] -c [number processes] 92 | ``` 93 | 94 | - We use `mpc-tiling` to create tiles and we use the previous computed (by `mpc-info`) 95 | number of tiles and CAABB (only X and Y coordinates). Note that the number of tiles 96 | must be power of 4, in this way and thanks to the used 97 | bounding box, the extents of the tiles will match the extent of the OctTree 98 | nodes at a certain level (and thus the future merging will be done faster) 99 | ``` 100 | mpc-tiling -i input -o tiles -t temp -e "[minX] [minY] [maxX] [maxY]" -n [number tiles] -p [number processes] 101 | ``` 102 | 103 | - Run the individual PotreeConverters for each tile using ALWAYS the same 104 | previously computed CAABB, spacing and number of levels. 105 | Use `mpc-create-config-pycoeman` to create a XML with the list of PotreeConverter commands that have to be executed. 106 | The format used is the parallel commands XML configuration file format for pycoeman. 107 | Then run any of the pycoeman tools to execute the commands. There are options to run them locally, in a SGE cluster or in a bunch of ssh-reachable hosts. 108 | In all cases it is not recommended to use more than 8 cores per machine since the processing is quite IO-bound. The example is using pycoeman locally in which case `.` must be the parent folder of `tiles`. For other pycoeman parallel commands execution modes visit https://github.com/NLeSC/pycoeman. 109 | ``` 110 | mpc-create-config-pycoeman -i tiles -o ParallelPotreeConverter.xml -f [format LAS or LAZ] -l [levels] -s [spacing] -e "[minX] [minY] [minZ] [maxX] [maxY] [maxZ]" 111 | coeman-par-local -d . -c ParallelPotreeConverter.xml -e ParallelExecution -n [number processes] 112 | ``` 113 | 114 | - After the various Potree-OctTrees are created (one per tile) we need to merge them 115 | into a single one. For this you need to use the `mpc-merge` tool which 116 | joins two Potree-OctTrees into one. 117 | You need to run different iterations until there is only one Potree-OctTree 118 | The script `mpc-merge-all` can be used to merged all the Potree-OctTrees into one 119 | but this has to be used carefully. The final Potree-Octree will be the folder in `Potree-OctTrees-merged` with the highest merging value. 120 | ``` 121 | mkdir Potree-OctTrees 122 | mv ParallelExecution/*/* Potree-OctTrees 123 | mpc-merge-all -i Potree-OctTrees -o Potree-OctTrees-merged -m 124 | ``` 125 | 126 | See an example in [AHN2](http://ahn2.pointclouds.nl). 127 | For this web also the following repositories where used: 128 | 129 | - https://github.com/NLeSC/ahn-pointcloud-viewer 130 | - https://github.com/NLeSC/ahn-pointcloud-viewer-ws 131 | 132 | ### Optional steps 133 | 134 | - Index and sort the raw data (we consider raw data the data before the 2D tiling). Since we are running it on a Linux system we need [wine](https://www.winehq.org/) to run *lassort.exe*. Hence, before the user runs `mpc-sort-index` (s)he should set the environment variable *LASSORT*. 135 | ``` 136 | export LASSORT="wine /bin/lassort.exe" 137 | ``` 138 | 139 | - Fill a DB with the extents of the files in the raw data. Before running `mpc-db-extents`, first create an user, a DB and add the postgis extension: 140 | ``` 141 | #login into postgres 142 | sudo -u postgres psql 143 | 144 | > create user with password ''; 145 | > create database pc_extents owner ; 146 | > \connect pc_extents 147 | > create extension postgis 148 | > \q 149 | ``` 150 | 151 | - Fill a DB with the extents of the files in the potree octree. Run the `mpc-db-extents-potree` 152 | 153 | ## Docker 154 | 155 | We have created a Dockerfile to use the basic tools of Massive-PotreeConverter. 156 | It is meant to help you when running `mpc-info`, `mpc-tiling`, `mpc-create-config-pycoeman`, `coeman-par-local` and `mpc-merge` (or `mpc-merge-all`) 157 | 158 | Don't know about Docker? See [Docker](https://www.docker.com/) 159 | 160 | There is also an image build in [Docker Hub](https://registry.hub.docker.com/u/oscarmartinezrubi/massive-potreeconverter/) that can be directly pulled and worked with! 161 | 162 | In addition to installing all the required software it also creates three volumnes (/data1, /data2, /data3) which are meant to be mounted from different devices when executing docker run. Ideally always try to run in a way that the input data is in one device and the output in another (we actually have 3 volumes because of temp data folder required by `mpc-tiling`) 163 | 164 | An example of using Massive-PotreeConverter through docker: 165 | 166 | - Build the Massive-PotreeConverter docker image from the Dockerfile in this GitHub repository or pull the image from Docker Hub. The following instructions assume that the first option was used. If you pulled the image from Docker you will need to replace the image name. 167 | ``` 168 | cd /path/to/Massive-PotreeConverter 169 | docker build -t oscar/mpc:v1 . 170 | # OR 171 | docker pull oscarmartinezrubi/massive-potreeconverter 172 | ``` 173 | 174 | - Assuming that our LAZ/LAS files are in `/media/data/big/sample`, run `mpc-info` to know the point cloud details: 175 | ``` 176 | docker run -v /media/data/big/sample:/data1 oscar/mpc:v1 mpc-info -i /data1 -c 4 177 | ``` 178 | 179 | - Run `mpc-tiling` to generate tiles (use the number of tiles and X,Y values of the CAABB suggested in the previous step). Note that we specify 3 different local folders which will be available in the docker container, one for the input data, one for the output and one for the temporal data. Also note that a local file in `/media/data/big/sample/myfile` is accessed as `/data1/myfile` in the container. 180 | ``` 181 | docker run -v /media/data/big/sample:/data1 -v /media/data/big/sample_tiles:/data2 -v /media/data/big/sample_tiles_temp:/data3 oscar/mpc:v1 mpc-tiling -i /data1/ -o /data2/ -t /data3/ -e "1555 1749 21659 21853" -n 4 -p 4 182 | ``` 183 | 184 | - Run `mpc-create-config-pycoeman` to create the XML configuration file for the different PotreeConverters. Then run them in parallel in the local machine with `coeman-par-local`. Note that we use the details suggested by `mpc-info` for the PotreeConverters. Note that pycoeman can also be used to run the various PotreeConverters in a SGE cluster or in a bunch of ssh-reachable machines. However, the docker instance is only meant for local executions. To use SGE clusters or a bunch of ssh-reachable machines you need to install Massive-PotreeConverter and dependencies in all the involved machines. 185 | ``` 186 | mkdir /media/data/big/sample_distpotree 187 | docker run -v /media/data/big/sample_distpotree:/data1 -v /media/data/big/sample_tiles:/data2 oscar/mpc:v1 mpc-create-config-pycoeman -i /data2 -o /data1/ParallelPotreeConverter.xml -f LAZ -l 9 -s 83 -e "1555 1749 -94 21659 21853 20010" 188 | docker run -v /media/data/big/sample_distpotree:/data1 -v /media/data/big/sample_tiles:/data2 oscar/mpc:v1 coeman-par-local -d / -c /data1/ParallelPotreeConverter.xml -e /data1/execution -n 4 189 | ``` 190 | 191 | - Run the script to merge all the Potree-OctTrees into one. Note that in this case we only mount and use one volume. For this specific script it is better to have the same device for both input and output. 192 | ``` 193 | sudo mv sample_distpotree/execution/*/* sample_distpotree/poctrees/ 194 | docker run -v /media/data/big/sample_distpotree:/data1 oscar/mpc:v1 mpc-merge-all -i /data1/poctrees -o /data1/poctrees_merge -m 195 | ``` 196 | -------------------------------------------------------------------------------- /Readme_UK.md: -------------------------------------------------------------------------------- 1 | # Massive PotreeConverter test run for UK LIDAR point cloud (free) 2 | A test run of the Massive-PotreeConverter for the UK LIDAR point cloud. 3 | Massive-PotreeConverter consists of four steps, all executable through command-line tools. 4 | We examine each step seperatly and skip the tiling since it loads all the input laz files into memory, which may slow the computer considerbly. 5 | ## Steps: 6 | * Download UK laz files from the [UK survey](http://environment.data.gov.uk/ds/survey). 7 | Each tile is divided into NW,SE,NE,SW (zipped files), and some laz files might be duplicated. 8 | ` ` should contain unique laz files. 9 | * mkdir ` ` 10 | * cd ` ` 11 | * put laz files in ` ` 12 | 13 | * RUN 14 | 15 | `python3 pympc/get_info.py -i ` 16 | 17 | Output contains information about suggested Potree-OctTree CAABB, spacing, number of levels and a suggeted potreeconverter command. 18 | 19 | For example: 20 | ``` 21 | Average density [pts / m2]: 2.1390596605415118 22 | Suggested number of tiles: 1. For this number of points Massive-PotreeConverter is not really required! 23 | Suggested Potree-OctTree CAABB: 340000 130000 -1 349999 139999 9998 24 | Suggested Potree-OctTree spacing: 41 25 | Suggested Potree-OctTree number of levels: 7 26 | Suggested potreeconverter command: 27 | $(which PotreeConverter) -o -l 7 -s 41 --aabb "340000 130000 -1 349999 139999 9998" --output-format LAZ -i 28 | ``` 29 | 30 | ## Option 1: run a potree for each laz seperatly in order to then merge them 31 | * RUN (use full path for the directories) 32 | 33 | `for d in 'ls uk_merge_rawlaz_tiles/uk_st43_flat'` 34 | 35 | 36 | `do` 37 | 38 | 39 | `$(which PotreeConverter) -o $$d -l 7 -s 41 --aabb "340000 130000 -1 349999 139999 9998" --output-format LAZ -i $$d` 40 | 41 | 42 | `done ` 43 | 44 | * Merge the octrees: RUN 45 | 46 | ` python3 pympc/merge_potree_all.py -i -o ` 47 | 48 | ## Visualization 49 | * View in potree-viewer. 50 | 51 | *For demonstration purposes we first merged only part of the octree tiles, presented in this figure. When merging all the laz files we will get Figure 2 (see below).* 52 | 53 | ![alt tag](https://github.com/NLeSC/Massive-PotreeConverter/blob/master/data/Potree_UK_midmerge1.PNG) 54 | 55 | ## Option 2: use PotreeConverter to build potree octree from the laz files . 56 | * RUN 57 | 58 | `$(which PotreeConverter) -o -l 7 -s 41 --aabb "340000 130000 -1 349999 139999 9998" --output-format LAZ -i ` 59 | 60 | ## Visualization 61 | * View in potree-viewer 62 | 63 | ![alt tag](https://github.com/NLeSC/Massive-PotreeConverter/blob/master/data/Potree_UK1.PNG) 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /data/Potree_UK1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NLeSC/Massive-PotreeConverter/7ec0fdd60698beca55ceda948cb58f5e5fae1117/data/Potree_UK1.PNG -------------------------------------------------------------------------------- /data/Potree_UK_midmerge1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NLeSC/Massive-PotreeConverter/7ec0fdd60698beca55ceda948cb58f5e5fae1117/data/Potree_UK_midmerge1.PNG -------------------------------------------------------------------------------- /figures/2D-simplification-of-the-various-steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NLeSC/Massive-PotreeConverter/7ec0fdd60698beca55ceda948cb58f5e5fae1117/figures/2D-simplification-of-the-various-steps.png -------------------------------------------------------------------------------- /pympc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NLeSC/Massive-PotreeConverter/7ec0fdd60698beca55ceda948cb58f5e5fae1117/pympc/__init__.py -------------------------------------------------------------------------------- /pympc/create_pycoeman_config_run_massive_potree_converter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import argparse, os 3 | from lxml import etree 4 | 5 | def run(inputFolder, outputFile, outputFormat, levels, spacing, extent): 6 | # Check user parameters 7 | if not os.path.isdir(inputFolder): 8 | raise Exception(inputFolder + ' does not exist') 9 | if os.path.isfile(outputFile): 10 | raise Exception(outputFile + ' already exists!') 11 | outputFileAbsPath = os.path.abspath(outputFile) 12 | 13 | # Create output file 14 | oFile = open(outputFileAbsPath, 'w') 15 | xmlRootElement = etree.Element('ParCommands') 16 | 17 | for tile in os.listdir(inputFolder): 18 | 19 | if tile != 'tiles.js': 20 | 21 | tileRelPath = inputFolder + '/' + tile 22 | 23 | xmlComponentElement = etree.SubElement(xmlRootElement, 'Component') 24 | 25 | xmlIdElement = etree.SubElement(xmlComponentElement, 'id') 26 | xmlIdElement.text = tile + '_potree_converter' 27 | 28 | xmlRequireElement = etree.SubElement(xmlComponentElement, 'require') 29 | xmlRequireElement.text = tileRelPath 30 | 31 | localOutputFolder = tile + '_potree' 32 | 33 | xmlCommandElement = etree.SubElement(xmlComponentElement, 'command') 34 | xmlCommandElement.text = 'PotreeConverter --outdir ' + localOutputFolder + ' --levels ' + str(levels) + ' --output-format ' + str(outputFormat).upper() + ' --source ' + tile + ' --spacing ' + str(spacing) + ' --aabb "' + extent + '"' 35 | 36 | xmlOutputElement = etree.SubElement(xmlComponentElement, 'output') 37 | xmlOutputElement.text = localOutputFolder 38 | 39 | oFile.write(etree.tostring(xmlRootElement, pretty_print=True, encoding='utf-8').decode('utf-8')) 40 | oFile.close() 41 | 42 | 43 | def argument_parser(): 44 | # define argument menu 45 | parser = argparse.ArgumentParser( 46 | description="Creates a parallel commands XML configuration file. This XML file can be used with pycoeman to run the tasks in a SGE cluster, in a bunch of ssh-reachable hosts or in the local machine") 47 | parser.add_argument('-i','--input',default='',help='Input folder with the tiles. This folder must contain subfolders, one for each tile. Each tile subfolder must contain the LAS/LAZ files in the tile',type=str, required=True) 48 | parser.add_argument('-o','--output',default='',help='Output parallel commands XML configuration file',type=str, required=True) 49 | parser.add_argument('-f','--format',default='',help='Format (LAS or LAZ)',type=str, required=True) 50 | parser.add_argument('-l','--levels',default='',help='Number of levels for OctTree',type=int, required=True) 51 | parser.add_argument('-s','--spacing',default='',help='Spacing at root level',type=int, required=True) 52 | parser.add_argument('-e','--extent',default='',help='Extent to be used for all the OctTree, specify as "minX minY minZ maxX maxY maxZ"',type=str, required=True) 53 | return parser 54 | 55 | def main(): 56 | args = argument_parser().parse_args() 57 | run(args.input, args.output, args.format, args.levels, args.spacing, args.extent) 58 | 59 | if __name__ == "__main__": 60 | main() 61 | -------------------------------------------------------------------------------- /pympc/fill_db_extents.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse, traceback, time, os, multiprocessing, psycopg2 3 | from pympc import utils 4 | 5 | USERNAME = utils.getUserName() 6 | 7 | def runProcess(processIndex, tasksQueue, resultsQueue, connectionString, srid, tableName): 8 | connection = psycopg2.connect(connectionString) 9 | cursor = connection.cursor() 10 | kill_received = False 11 | while not kill_received: 12 | fileAbsPath = None 13 | try: 14 | # This call will patiently wait until new job is available 15 | fileAbsPath = tasksQueue.get() 16 | except: 17 | # if there is an error we will quit 18 | kill_received = True 19 | if fileAbsPath == None: 20 | # If we receive a None job, it means we can stop 21 | kill_received = True 22 | else: 23 | (count, minX, minY, minZ, maxX, maxY, maxZ, _, _, _, _, _, _) = utils.getPCFileDetails(fileAbsPath) 24 | insertStatement = "INSERT INTO " + tableName + "(filepath,numberpoints,minz,maxz,geom) VALUES (%s, %s, %s, %s, ST_MakeEnvelope(%s, %s, %s, %s, %s))" 25 | insertArgs = [fileAbsPath, int(count), float(minZ), float(maxZ), float(minX), float(minY), float(maxX), float(maxY), int(srid)] 26 | cursor.execute(insertStatement, insertArgs) 27 | cursor.connection.commit() 28 | resultsQueue.put((processIndex, fileAbsPath)) 29 | connection.close() 30 | 31 | def run(inputFolder, srid, dbName, dbPass, dbUser, dbHost, dbPort, tableName, numberProcs, addToExisting): 32 | # Make connection 33 | connectionString = utils.getConnectString(dbName, dbUser, dbPass, dbHost, dbPort) 34 | 35 | # Make it absolute path 36 | inputFolder = os.path.abspath(inputFolder) 37 | 38 | if addToExisting == False: 39 | # Create table if it does not exist 40 | connection = psycopg2.connect(connectionString) 41 | cursor = connection.cursor() 42 | cursor.execute('CREATE TABLE ' + tableName + ' (filepath text, numberpoints integer, minz double precision, maxz double precision, geom public.geometry(Geometry, %s))', [srid, ]) 43 | connection.commit() 44 | connection.close() 45 | 46 | # Create queues for the distributed processing 47 | tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles) 48 | resultsQueue = multiprocessing.Queue() # The queue of results 49 | 50 | inputFiles = utils.getFiles(inputFolder, recursive=True) 51 | numFiles = len(inputFiles) 52 | 53 | # Add tasks/inputFiles 54 | for i in range(numFiles): 55 | tasksQueue.put(inputFiles[i]) 56 | for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO) 57 | tasksQueue.put(None) 58 | 59 | processes = [] 60 | # We start numberProcs users processes 61 | for i in range(numberProcs): 62 | processes.append(multiprocessing.Process(target=runProcess, 63 | args=(i, tasksQueue, resultsQueue, connectionString, srid, tableName))) 64 | processes[-1].start() 65 | 66 | # Get all the results (actually we do not need the returned values) 67 | for i in range(numFiles): 68 | resultsQueue.get() 69 | print ('Completed %d of %d (%.02f%%)' % (i+1, numFiles, 100. * float(i+1) / float(numFiles))) 70 | # wait for all users to finish their execution 71 | for i in range(numberProcs): 72 | processes[i].join() 73 | 74 | # Create an index for the geometries 75 | if addToExisting == False: 76 | connection = psycopg2.connect(connectionString) 77 | cursor = connection.cursor() 78 | cursor.execute('CREATE INDEX ' + tableName + '_geom ON ' + tableName + ' USING GIST ( geom )') 79 | connection.commit() 80 | connection.close() 81 | 82 | def argument_parser(): 83 | """ Define the arguments and return the parser object""" 84 | parser = argparse.ArgumentParser( 85 | description="""Creates a table in a PostgreSQL/PostGIS DB with the extent information of the LAS/LAZ files in a folder and creates index. This requires that the DB has PostGIS extension installed""") 86 | parser.add_argument('-d','--dbname',default=utils.DB_NAME,help='Postgres DB name [default ' + utils.DB_NAME + ']',type=str) 87 | parser.add_argument('-u','--dbuser',default=USERNAME,help='DB user [default ' + USERNAME + ']',type=str) 88 | parser.add_argument('-p','--dbpass',default='',help='DB pass',type=str) 89 | parser.add_argument('-b','--dbhost',default='',help='DB host',type=str) 90 | parser.add_argument('-r','--dbport',default='',help='DB port',type=str) 91 | parser.add_argument('-c','--proc',default=1,help='Number of processes [default is 1].',type=int) 92 | parser.add_argument('-a','--add',default=False,help='Adds the extents to an existing table [default is False]. In this case no index is created',action='store_true') 93 | requiredArgs = parser.add_argument_group('required arguments') 94 | requiredArgs.add_argument('-i','--input',default='',help='Input folder with the Potree OctTree (must contain the cloud.js file and the data folder)',type=str, required=True) 95 | requiredArgs.add_argument('-s','--srid',default='',help='SRID',type=int, required=True) 96 | return parser 97 | return parser 98 | 99 | def main(): 100 | args = argument_parser().parse_args() 101 | print ('Input folder: ', args.input) 102 | print ('SRID: ', args.srid) 103 | print ('DB name: ', args.dbname) 104 | print ('DB user: ', args.dbuser) 105 | print ('DB pass: ', '*'*len(args.dbpass)) 106 | print ('DB host: ', args.dbhost) 107 | print ('DB port: ', args.dbport) 108 | 109 | try: 110 | t0 = time.time() 111 | print ('Starting ' + os.path.basename(__file__) + '...') 112 | run(args.input, args.srid, args.dbname, args.dbpass, args.dbuser, args.dbhost, args.dbport, utils.DB_TABLE_RAW, args.proc, args.add) 113 | print ('Finished in %.2f seconds' % (time.time() - t0)) 114 | except: 115 | print ('Execution failed!') 116 | print (traceback.format_exc()) 117 | 118 | if __name__ == "__main__": 119 | main() 120 | -------------------------------------------------------------------------------- /pympc/fill_db_extents_potree.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse, traceback, time, os, psycopg2, json 3 | from pympc import utils 4 | 5 | USERNAME = utils.getUserName() 6 | COMMIT_INTERVAL = 1000 7 | 8 | counter = 0 9 | 10 | def getChildBC(minX,minY,minZ,maxX,maxY,maxZ,childIndex): 11 | rX = (maxX - minX) / 2. 12 | rY = (maxY - minY) / 2. 13 | rZ = (maxZ - minZ) / 2. 14 | if childIndex == 0: 15 | return (minX, minY, minZ, minX + rX, minY + rY, minZ + rZ) 16 | elif childIndex == 1: 17 | return (minX, minY, minZ + rZ, minX + rX, minY + rY, maxZ) 18 | elif childIndex == 2: 19 | return (minX, minY + rY, minZ, minX + rX, maxY, minZ + rZ) 20 | elif childIndex == 3: 21 | return (minX, minY + rY, minZ + rZ, minX + rX, maxY, maxZ) 22 | elif childIndex == 4: 23 | return (minX + rX, minY, minZ, maxX, minY + rY, minZ + rZ) 24 | elif childIndex == 5: 25 | return (minX + rX, minY, minZ + rZ, maxX, minY + rY, maxZ) 26 | elif childIndex == 6: 27 | return (minX + rX, minY + rY, minZ, maxX, maxY, minZ + rZ) 28 | elif childIndex == 7: 29 | return (minX + rX, minY + rY, minZ + rZ, maxX, maxY, maxZ) 30 | else: 31 | raise Exception('Child index must be [0,7]!') 32 | 33 | def addNodeFolder(cursor, node, nodeAbsPath, hierarchyStepSize, extension, minX, minY, minZ, maxX, maxY, maxZ, srid, tableName): 34 | hrcFile = node + '.hrc' 35 | hrc = None 36 | if os.path.isfile(nodeAbsPath + '/' + hrcFile): 37 | # Check if there is data in this node in Octtree A (we check if the HRC file for this node exist) 38 | hrc = utils.readHRC(nodeAbsPath + '/' + hrcFile, hierarchyStepSize) 39 | for level in range(hierarchyStepSize+1): 40 | for i in range(len(hrc[level])): 41 | if hrc[level][i]: 42 | (childNode, isFile) = utils.getNodeName(level, i, node, hierarchyStepSize, extension) 43 | relativeNode = childNode.replace(node,'').replace('.' + extension, '') 44 | (lminX, lminY, lminZ, lmaxX, lmaxY, lmaxZ) = (minX, minY, minZ, maxX, maxY, maxZ) 45 | for pNode in relativeNode: 46 | (lminX, lminY, lminZ, lmaxX, lmaxY, lmaxZ) = getChildBC(lminX, lminY, lminZ, lmaxX, lmaxY, lmaxZ, int(pNode)) 47 | if isFile: 48 | addNodeFile(cursor, [nodeAbsPath + '/' + childNode, len(childNode) - 5, int(hrc[level][i]), lminZ, lmaxZ, lminX, lminY, lmaxX, lmaxY, int(srid)], tableName) 49 | else: 50 | addNodeFolder(cursor, node + childNode, nodeAbsPath + '/' + childNode, hierarchyStepSize, extension, lminX, lminY, lminZ, lmaxX, lmaxY, lmaxZ, srid, tableName) 51 | 52 | def addNodeFile(cursor, insertArgs, tableName): 53 | global counter 54 | insertStatement = "INSERT INTO " + tableName + "(filepath,level,numberpoints,minz,maxz,geom) VALUES (%s, %s, %s, %s, %s, ST_MakeEnvelope(%s, %s, %s, %s, %s))" 55 | cursor.execute(insertStatement, insertArgs) 56 | counter += 1 57 | if counter == COMMIT_INTERVAL: 58 | cursor.connection.commit() 59 | counter = 0 60 | 61 | 62 | def run(inputFolder, srid, dbName, dbPass, dbUser, dbHost, dbPort, tableName): 63 | # Make connection 64 | connectionString = utils.getConnectString(dbName, dbUser, dbPass, dbHost, dbPort) 65 | connection = psycopg2.connect(connectionString) 66 | cursor = connection.cursor() 67 | 68 | # Make it absolute path 69 | inputFolder = os.path.abspath(inputFolder) 70 | cloudJSAbsPath = inputFolder + '/cloud.js' 71 | if not os.path.isfile(cloudJSAbsPath): 72 | raise Exception('Error: ' + cloudJSAbsPath + ' is not found!') 73 | 74 | cloudJSData = json.loads(open(cloudJSAbsPath, 'r').read()) 75 | hierarchyStepSize = cloudJSData['hierarchyStepSize'] 76 | cloudJSBBox = cloudJSData['boundingBox'] 77 | (minX,minY,minZ,maxX,maxY,maxZ) = (cloudJSBBox['lx'],cloudJSBBox['ly'],cloudJSBBox['lz'],cloudJSBBox['ux'],cloudJSBBox['uy'],cloudJSBBox['uz']) 78 | 79 | cursor.execute('CREATE TABLE ' + tableName + ' (filepath text, level integer, numberpoints integer, minz double precision, maxz double precision, geom public.geometry(Geometry, %s))', [srid, ]) 80 | connection.commit() 81 | dataAbsPath = inputFolder + '/data' 82 | if len(os.listdir(dataAbsPath)): 83 | listFileRootA = os.listdir(dataAbsPath + '/r') 84 | if 'r.las' in listFileRootA: 85 | extension = 'las' 86 | elif 'r.laz' in listFileRootA: 87 | extension = 'laz' 88 | else: 89 | raise Exception('Error: ' + __file__ + ' only compatible with las/laz format') 90 | addNodeFolder(cursor, 'r', dataAbsPath + '/r', hierarchyStepSize, extension, minX, minY, minZ, maxX, maxY, maxZ, srid, tableName) 91 | else: 92 | raise Exception('Error: ' + dataAbsPath + ' is empty!') 93 | 94 | # Commit last uncommited inserts 95 | connection.commit() 96 | 97 | # Create an index for the geometries 98 | cursor.execute('CREATE INDEX ' + tableName + '_geom ON ' + tableName + ' USING GIST ( geom )') 99 | cursor.execute('CREATE INDEX ' + tableName + '_level ON ' + tableName + ' (level)') 100 | connection.commit() 101 | 102 | # cursor.execute('CREATE TABLE ' + utils.DB_TABLE_POTREE_DIST + ' as SELECT level, count(numberpoints) as numberfiles, sum(numberpoints) as numberpoints, ((sum(numberpoints) :: float) / (SELECT sum(numberpoints) FROM ' + utils.DB_TABLE_RAW + ') :: float) as ratio FROM ' + utils.DB_TABLE_POTREE + ' GROUP BY level ORDER BY level') 103 | # connection.commit() 104 | 105 | connection.close() 106 | 107 | def argument_parser(): 108 | """ Define the arguments and return the parser object""" 109 | parser = argparse.ArgumentParser( 110 | description="""Creates a DB table with the extent information of the files in a Potree OctTree. 111 | The files are not opened, instead we get the information from the HRC files and the 112 | known extent of the OctTree nodes""") 113 | parser.add_argument('-d','--dbname',default=utils.DB_NAME,help='Postgres DB name [default ' + utils.DB_NAME + ']',type=str) 114 | parser.add_argument('-u','--dbuser',default=USERNAME,help='DB user [default ' + USERNAME + ']',type=str) 115 | parser.add_argument('-p','--dbpass',default='',help='DB pass',type=str) 116 | parser.add_argument('-b','--dbhost',default='',help='DB host',type=str) 117 | parser.add_argument('-r','--dbport',default='',help='DB port',type=str) 118 | requiredArgs = parser.add_argument_group('required arguments') 119 | requiredArgs.add_argument('-i','--input',default='',help='Input folder with the Potree OctTree (must contain the cloud.js file and the data folder)',type=str, required=True) 120 | requiredArgs.add_argument('-s','--srid',default='',help='SRID',type=int, required=True) 121 | return parser 122 | 123 | def main(): 124 | args = argument_parser().parse_args() 125 | print ('Input Potree OctTree: ', args.input) 126 | print ('SRID: ', args.srid) 127 | print ('DB name: ', args.dbname) 128 | print ('DB user: ', args.dbuser) 129 | print ('DB pass: ', '*'*len(args.dbpass)) 130 | print ('DB host: ', args.dbhost) 131 | print ('DB port: ', args.dbport) 132 | 133 | try: 134 | t0 = time.time() 135 | print ('Starting ' + os.path.basename(__file__) + '...') 136 | run(args.input, args.srid, args.dbname, args.dbpass, args.dbuser, args.dbhost, args.dbport, utils.DB_TABLE_POTREE) 137 | print ('Finished in %.2f seconds' % (time.time() - t0)) 138 | except: 139 | print ('Execution failed!') 140 | print (traceback.format_exc()) 141 | 142 | if __name__ == "__main__": 143 | main() 144 | -------------------------------------------------------------------------------- /pympc/generate_tiles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | This script is used to distribute the points of a bunch of LAS/LAZ files in 4 | different tiles. The XY extent of the different tiles match the XY extent of the 5 | nodes of a certain level of a octree defined by the provided bounding box (z 6 | is not required by the XY tiling). Which level of the octree is matched 7 | depends on specified number of tiles: 8 | - 4 (2X2) means matching with level 1 of octree 9 | - 16 (4x4) means matching with level 2 of octree 10 | and so on. 11 | """ 12 | 13 | import argparse, traceback, time, os, math, multiprocessing, json 14 | from pympc import utils 15 | 16 | def getTileIndex(pX, pY, minX, minY, maxX, maxY, axisTiles): 17 | xpos = int((pX - minX) * axisTiles / (maxX - minX)) 18 | ypos = int((pY - minY) * axisTiles / (maxY - minY)) 19 | if xpos == axisTiles: # If it is in the edge of the box (in the maximum side) we need to put in the last tile 20 | xpos -= 1 21 | if ypos == axisTiles: 22 | ypos -= 1 23 | return (xpos, ypos) 24 | 25 | def getTileName(xIndex, yIndex): 26 | return 'tile_%d_%d' % (int(xIndex), int(yIndex)) 27 | 28 | def runProcess(processIndex, tasksQueue, resultsQueue, minX, minY, maxX, maxY, outputFolder, tempFolder, axisTiles): 29 | kill_received = False 30 | while not kill_received: 31 | inputFile = None 32 | try: 33 | # This call will patiently wait until new job is available 34 | inputFile = tasksQueue.get() 35 | except: 36 | # if there is an error we will quit 37 | kill_received = True 38 | if inputFile == None: 39 | # If we receive a None job, it means we can stop 40 | kill_received = True 41 | else: 42 | # Get number of points and BBOX of this file 43 | (fCount, fMinX, fMinY, _, fMaxX, fMaxY, _, _, _, _, _, _, _) = utils.getPCFileDetails(inputFile) 44 | print ('Processing', os.path.basename(inputFile), fCount, fMinX, fMinY, fMaxX, fMaxY) 45 | # For the four vertices of the BBOX we get in which tile they should go 46 | posMinXMinY = getTileIndex(fMinX, fMinY, minX, minY, maxX, maxY, axisTiles) 47 | posMinXMaxY = getTileIndex(fMinX, fMaxY, minX, minY, maxX, maxY, axisTiles) 48 | posMaxXMinY = getTileIndex(fMaxX, fMinY, minX, minY, maxX, maxY, axisTiles) 49 | posMaxXMaxY = getTileIndex(fMaxX, fMaxY, minX, minY, maxX, maxY, axisTiles) 50 | 51 | if (posMinXMinY == posMinXMaxY) and (posMinXMinY == posMaxXMinY) and (posMinXMinY == posMaxXMaxY): 52 | # If they are the same the whole file can be directly copied to the tile 53 | tileFolder = outputFolder + '/' + getTileName(*posMinXMinY) 54 | if not os.path.isdir(tileFolder): 55 | utils.shellExecute('mkdir -p ' + tileFolder) 56 | utils.shellExecute('cp ' + inputFile + ' ' + tileFolder) 57 | else: 58 | # If not, we run PDAL gridder to split the file in pieces that can go to the tiles 59 | tGCount = runPDALSplitter(processIndex, inputFile, outputFolder, tempFolder, minX, minY, maxX, maxY, axisTiles) 60 | if tGCount != fCount: 61 | print ('WARNING: split version of ', inputFile, ' does not have same number of points (', tGCount, 'expected', fCount, ')') 62 | resultsQueue.put((processIndex, inputFile, fCount)) 63 | 64 | def runPDALSplitter(processIndex, inputFile, outputFolder, tempFolder, minX, minY, maxX, maxY, axisTiles): 65 | pTempFolder = tempFolder + '/' + str(processIndex) 66 | if not os.path.isdir(pTempFolder): 67 | utils.shellExecute('mkdir -p ' + pTempFolder) 68 | 69 | # Get the lenght required by the PDAL split filter in order to get "squared" tiles 70 | lengthPDAL = (maxX - minX) / float(axisTiles) 71 | 72 | utils.shellExecute('pdal split -i ' + inputFile + ' -o ' + pTempFolder + '/' + os.path.basename(inputFile) + ' --origin_x=' + str(minX) + ' --origin_y=' + str(minY) + ' --length ' + str(lengthPDAL)) 73 | tGCount = 0 74 | for gFile in os.listdir(pTempFolder): 75 | (gCount, gFileMinX, gFileMinY, _, gFileMaxX, gFileMaxY, _, _, _, _, _, _, _) = utils.getPCFileDetails(pTempFolder + '/' + gFile) 76 | # This tile should match with some tile. Let's use the central point to see which one 77 | pX = gFileMinX + ((gFileMaxX - gFileMinX) / 2.) 78 | pY = gFileMinY + ((gFileMaxY - gFileMinY) / 2.) 79 | tileFolder = outputFolder + '/' + getTileName(*getTileIndex(pX, pY, minX, minY, maxX, maxY, axisTiles)) 80 | if not os.path.isdir(tileFolder): 81 | utils.shellExecute('mkdir -p ' + tileFolder) 82 | utils.shellExecute('mv ' + pTempFolder + '/' + gFile + ' ' + tileFolder + '/' + gFile) 83 | tGCount += gCount 84 | return tGCount 85 | 86 | 87 | def run(inputFolder, outputFolder, tempFolder, extent, numberTiles, numberProcs): 88 | # Check input parameters 89 | if not os.path.isdir(inputFolder) and not os.path.isfile(inputFolder): 90 | raise Exception('Error: Input folder does not exist!') 91 | if os.path.isfile(outputFolder): 92 | raise Exception('Error: There is a file with the same name as the output folder. Please, delete it!') 93 | elif os.path.isdir(outputFolder) and os.listdir(outputFolder): 94 | raise Exception('Error: Output folder exists and it is not empty. Please, delete the data in the output folder!') 95 | # Get the number of tiles per dimension (x and y) 96 | axisTiles = math.sqrt(numberTiles) 97 | if (not axisTiles.is_integer()) or (int(axisTiles) % 2): 98 | raise Exception('Error: Number of tiles must be the square of number which is power of 2!') 99 | axisTiles = int(axisTiles) 100 | 101 | # Create output and temporal folder 102 | utils.shellExecute('mkdir -p ' + outputFolder) 103 | utils.shellExecute('mkdir -p ' + tempFolder) 104 | 105 | (minX, minY, maxX, maxY) = extent.split(' ') 106 | minX = float(minX) 107 | minY = float(minY) 108 | maxX = float(maxX) 109 | maxY = float(maxY) 110 | 111 | if (maxX - minX) != (maxY - minY): 112 | raise Exception('Error: Tiling requires that maxX-minX must be equal to maxY-minY!') 113 | 114 | inputFiles = utils.getFiles(inputFolder, recursive=True) 115 | numInputFiles = len(inputFiles) 116 | print ('%s contains %d files' % (inputFolder, numInputFiles)) 117 | 118 | # Create queues for the distributed processing 119 | tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles) 120 | resultsQueue = multiprocessing.Queue() # The queue of results 121 | 122 | # Add tasks/inputFiles 123 | for i in range(numInputFiles): 124 | tasksQueue.put(inputFiles[i]) 125 | for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO) 126 | tasksQueue.put(None) 127 | 128 | processes = [] 129 | # We start numberProcs users processes 130 | for i in range(numberProcs): 131 | processes.append(multiprocessing.Process(target=runProcess, 132 | args=(i, tasksQueue, resultsQueue, minX, minY, maxX, maxY, outputFolder, tempFolder, axisTiles))) 133 | processes[-1].start() 134 | 135 | # Get all the results (actually we do not need the returned values) 136 | numPoints = 0 137 | for i in range(numInputFiles): 138 | (processIndex, inputFile, inputFileNumPoints) = resultsQueue.get() 139 | numPoints += inputFileNumPoints 140 | print ('Completed %d of %d (%.02f%%)' % (i+1, numInputFiles, 100. * float(i+1) / float(numInputFiles))) 141 | # wait for all users to finish their execution 142 | for i in range(numberProcs): 143 | processes[i].join() 144 | 145 | # Write the tile.js file with information about the tiles 146 | cFile = open(outputFolder + '/tiles.js', 'w') 147 | d = {} 148 | d["NumberPoints"] = numPoints 149 | d["numXTiles"] = axisTiles 150 | d["numYTiles"] = axisTiles 151 | d["boundingBox"] = {'lx':minX,'ly':minY,'ux':maxX,'uy':maxY} 152 | cFile.write(json.dumps(d,indent=4,sort_keys=True)) 153 | cFile.close() 154 | 155 | 156 | def argument_parser(): 157 | """ Define the arguments and return the parser object""" 158 | parser = argparse.ArgumentParser( 159 | description="""This script is used to distribute the points of a bunch of LAS/LAZ files in 160 | different tiles. The XY extent of the different tiles match the XY extent of the 161 | nodes of a certain level of a octree defined by the provided bounding box (z 162 | is not required by the XY tiling). Which level of the octree is matched 163 | depends on specified number of tiles: 164 | - 4 (2X2) means matching with level 1 of octree 165 | - 16 (4x4) means matching with level 2 of octree 166 | and so on. """) 167 | parser.add_argument('-i','--input',default='',help='Input data folder (with LAS/LAZ files)',type=str, required=True) 168 | parser.add_argument('-o','--output',default='',help='Output data folder for the different tiles',type=str, required=True) 169 | parser.add_argument('-t','--temp',default='',help='Temporal folder where required processing is done',type=str, required=True) 170 | parser.add_argument('-e','--extent',default='',help='XY extent to be used for the tiling, specify as "minX minY maxX maxY". maxX-minX must be equal to maxY-minY. This is required to have a good extent matching with the octree',type=str, required=True) 171 | parser.add_argument('-n','--number',default='',help='Number of tiles (must be the power of 4. Example: 4, 16, 64, 256, 1024, etc.)',type=int, required=True) 172 | parser.add_argument('-p','--proc',default=1,help='Number of processes [default is 1]',type=int) 173 | return parser 174 | 175 | def main(): 176 | args = argument_parser().parse_args() 177 | print ('Input folder: ', args.input) 178 | print ('Output folder: ', args.output) 179 | print ('Temporal folder: ', args.temp) 180 | print ('Extent: ', args.extent) 181 | print ('Number of tiles: ', args.number) 182 | print ('Number of processes: ', args.proc) 183 | 184 | try: 185 | t0 = time.time() 186 | print ('Starting ' + os.path.basename(__file__) + '...') 187 | run(args.input, args.output, args.temp, args.extent, args.number, args.proc) 188 | print( 'Finished in %.2f seconds' % (time.time() - t0)) 189 | except: 190 | print ('Execution failed!') 191 | print( traceback.format_exc()) 192 | 193 | if __name__ == "__main__": 194 | main() 195 | -------------------------------------------------------------------------------- /pympc/get_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """Gets the CAABB, number of points and average density for a point cloud. Also gets a suggested potreeconverter command""" 4 | 5 | import argparse, traceback, sys, math, time, os 6 | from pympc import utils 7 | 8 | def run(inputFolder, numberProcs, targetTile, targetSize): 9 | (_, tcount, tminx, tminy, tminz, tmaxx, tmaxy, tmaxz, _, _, _) = utils.getPCFolderDetails(inputFolder, numberProcs) 10 | #convert to integers 11 | tminx = int(math.ceil(tminx)) 12 | tminy = int(math.ceil(tminy)) 13 | tminz = int(math.ceil(tminz)) 14 | tmaxx = int(math.floor(tmaxx)) 15 | tmaxy = int(math.floor(tmaxy)) 16 | tmaxz = int(math.floor(tmaxz)) 17 | 18 | tRangeX = tmaxx - tminx 19 | tRangeY = tmaxy - tminy 20 | tRangeZ = tmaxz - tminz 21 | 22 | density2 = float(tcount) / (tRangeX*tRangeY) 23 | #density3 = float(tcount) / (tRangeX*tRangeY*tRangeZ) 24 | 25 | maxRange = max((tRangeX, tRangeY, tRangeZ)) 26 | 27 | (minX,minY,minZ,maxX,maxY,maxZ) = (tminx, tminy, tminz, tminx + maxRange, tminy + maxRange, tminz + maxRange) 28 | 29 | print('AABB: ', tminx, tminy, tminz, tmaxx, tmaxy, tmaxz) 30 | print('#Points:' , tcount) 31 | print('Average density [pts / m2]:' , density2) 32 | #print('Average density [pts / m3]:' , density3) 33 | 34 | if tcount < targetTile: 35 | print('Suggested number of tiles: 1. For this number of points Massive-PotreeConverter is not really required!') 36 | else: 37 | c = 1 38 | numpertile = None 39 | while True: 40 | numtiles = math.pow(math.pow(2,c),2) 41 | numpertile = float(tcount) / numtiles 42 | if numpertile < targetTile: 43 | break 44 | else: 45 | c+=1 46 | print('Suggested number of tiles: ', numtiles) 47 | 48 | deepSpacing = 1 / math.sqrt(density2) 49 | spacing = math.ceil(maxRange / math.sqrt(targetSize)) 50 | 51 | numlevels = 0 52 | lspacing = spacing 53 | while lspacing > deepSpacing: 54 | numlevels+=1 55 | lspacing = lspacing / 2 56 | numlevels+=1 57 | 58 | print('Suggested Potree-OctTree CAABB: ', minX,minY,minZ,maxX,maxY,maxZ) 59 | print('Suggested Potree-OctTree spacing: ', spacing) 60 | print('Suggested Potree-OctTree number of levels: ', numlevels) 61 | print('Suggested potreeconverter command:') 62 | print('$(which PotreeConverter) -o -l %i -s %i --CAABB "%i %i %i %i %i %i" --output-format LAZ -i ' % (numlevels, spacing, minX,minY,minZ,maxX,maxY,maxZ)) 63 | 64 | def argument_parser(): 65 | """ Define the arguments and return the parser object""" 66 | parser = argparse.ArgumentParser( 67 | description="Gets the bounding box of the points in the files of the input folder. Also computes the number of points and the density. It also suggests spacing and number of levels to use for PotreeConverter") 68 | parser.add_argument('-i','--input',default='',help='Input folder with the point cloud files',type=str, required=True) 69 | parser.add_argument('-c','--proc',default=1,help='Number of processes [default is 1]',type=int) 70 | parser.add_argument('-m','--avgtile',default=5000000000,help='Target average number of points per tile [default is 5000000000]',type=int) 71 | parser.add_argument('-t','--avgnode',default=60000,help='Target average number of points per OctTree node [default is 60000]',type=int) 72 | return parser 73 | 74 | def main(): 75 | args = argument_parser().parse_args() 76 | print('Input folder: ' , args.input) 77 | print('Number of processes: ' , args.proc) 78 | print('Target tile number of points: ' , args.avgtile) 79 | print('Target OctTree node number of points: ' , args.avgnode) 80 | 81 | try: 82 | t0 = time.time() 83 | print('Starting ' + os.path.basename(__file__) + '...') 84 | run(args.input, args.proc, args.avgtile, args.avgnode) 85 | print('Finished in %.2f seconds' % (time.time() - t0)) 86 | except: 87 | print('Execution failed!') 88 | print(traceback.format_exc()) 89 | 90 | if __name__ == "__main__": 91 | main() 92 | -------------------------------------------------------------------------------- /pympc/get_wkt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os, argparse, traceback, time, multiprocessing, glob 3 | from pympc import utils 4 | 5 | def runProcess(processIndex, tasksQueue, resultsQueue, outputFolder, useApprox): 6 | kill_received = False 7 | while not kill_received: 8 | tileAbsPath = None 9 | try: 10 | # This call will patiently wait until new job is available 11 | tileAbsPath = tasksQueue.get() 12 | except: 13 | # if there is an error we will quit 14 | kill_received = True 15 | if tileAbsPath == None: 16 | # If we receive a None job, it means we can stop 17 | kill_received = True 18 | else: 19 | tFile = open(outputFolder + '/' + os.path.basename(tileAbsPath) + '.wkt', 'w') 20 | (tMinX,tMinY,tMaxX,tMaxY) = (None, None, None, None) 21 | 22 | if os.path.isfile(tileAbsPath): 23 | tilefiles = [tileAbsPath,] 24 | else: 25 | tilefiles = glob.glob(tileAbsPath + '/*') 26 | 27 | for tilefile in tilefiles: 28 | (_, fMinX, fMinY, _, fMaxX, fMaxY, _, _, _, _, _, _, _) = utils.getPCFileDetails(tilefile) 29 | if useApprox: 30 | if tMinX == None or tMinX > fMinX: 31 | tMinX = fMinX 32 | if tMinY == None or tMinY > fMinY: 33 | tMinY = fMinY 34 | if tMaxX == None or tMaxX < fMaxX: 35 | tMaxX = fMaxX 36 | if tMaxY == None or tMaxY < fMaxY: 37 | tMaxY = fMaxY 38 | else: 39 | tFile.write('POLYGON ((%f %f, %f %f, %f %f, %f %f, %f %f))\n' % (fMinX, fMaxY, fMinX, fMinY, fMaxX, fMinY, fMaxX, fMaxY, fMinX, fMaxY)) 40 | if useApprox and tMinX != None: 41 | tFile.write('POLYGON ((%f %f, %f %f, %f %f, %f %f, %f %f))\n' % (tMinX, tMaxY, tMinX, tMinY, tMaxX, tMinY, tMaxX, tMaxY, tMinX, tMaxY)) 42 | tFile.close() 43 | resultsQueue.put((processIndex, tileAbsPath)) 44 | 45 | def run(inputFolder, outputFolder, numberProcs, useApprox): 46 | # Check input parameters 47 | if not os.path.isdir(inputFolder): 48 | raise Exception('Error: Input folder does not exist!') 49 | if os.path.isfile(outputFolder): 50 | raise Exception('Error: There is a file with the same name as the output folder. Please, delete it!') 51 | elif os.path.isdir(outputFolder) and os.listdir(outputFolder): 52 | raise Exception('Error: Output folder exists and it is not empty. Please, delete the data in the output folder!') 53 | 54 | # Create queues for the distributed processing 55 | tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles) 56 | resultsQueue = multiprocessing.Queue() # The queue of results 57 | 58 | os.system('mkdir -p ' + outputFolder) 59 | 60 | tilesNames = os.listdir(inputFolder) 61 | if 'tiles.js' in tilesNames: 62 | tilesNames.remove('tiles.js') 63 | numTiles = len(tilesNames) 64 | 65 | # Add tasks/inputFiles 66 | for i in range(numTiles): 67 | tasksQueue.put(inputFolder + '/' + tilesNames[i]) 68 | for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO) 69 | tasksQueue.put(None) 70 | 71 | processes = [] 72 | # We start numberProcs users processes 73 | for i in range(numberProcs): 74 | processes.append(multiprocessing.Process(target=runProcess, 75 | args=(i, tasksQueue, resultsQueue, outputFolder, useApprox))) 76 | processes[-1].start() 77 | 78 | # Get all the results (actually we do not need the returned values) 79 | for i in range(numTiles): 80 | resultsQueue.get() 81 | print ('Completed %d of %d (%.02f%%)' % (i+1, numTiles, 100. * float(i+1) / float(numTiles))) 82 | # wait for all users to finish their execution 83 | for i in range(numberProcs): 84 | processes[i].join() 85 | 86 | def argument_parser(): 87 | """ Define the arguments and return the parser object""" 88 | parser = argparse.ArgumentParser( 89 | description="From a folder full of point cloud elements (LAS/LAZ files or subfolders containing LAS/LAZ files), it create a WKT file with extent/s of the elements") 90 | parser.add_argument('-i','--input',default='',help='Input folder with the point cloud elements (a element can be a single LAS/LAZ file or a folder with LAS/LAZ files)',type=str, required=True) 91 | parser.add_argument('-o','--output',default='',help='Output folder for the WKT files',type=str, required=True) 92 | parser.add_argument('-c','--proc',default=1,help='Number of processes [default is 1]',type=int) 93 | parser.add_argument('-a','--approx',help='Only outputs one a BBOX per element instead of a BBOX per file in element (only applies if an element is a folder). [default False]',default=False,action='store_true') 94 | return parser 95 | 96 | def main(): 97 | args = argument_parser().parse_args() 98 | print ('Input folder: ', args.input) 99 | print ('Output folder: ', args.output) 100 | print ('Number of processes: ', args.proc) 101 | print ('Approximation: ', args.approx) 102 | try: 103 | t0 = time.time() 104 | print ('Starting ' + os.path.basename(__file__) + '...') 105 | run(args.input, args.output, args.proc, args.approx) 106 | print ('Finished in %.2f seconds' % (time.time() - t0)) 107 | except: 108 | print ('Execution failed!') 109 | print (traceback.format_exc()) 110 | 111 | if __name__ == "__main__": 112 | main() 113 | -------------------------------------------------------------------------------- /pympc/merge_potree.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Merge two Potree OctTrees into a single one.""" 3 | 4 | import argparse, traceback, time, os, json, numpy 5 | from pympc import utils 6 | 7 | 8 | def fixHeader(inputFile, outputFile): 9 | (_, minX, minY, minZ, maxX, maxY, maxZ, _, _, _, _, _, _) = utils.getPCFileDetails(inputFile) 10 | utils.shellExecute('lasinfo -i %s -nc -nv -nco -set_bounding_box %f %f %f %f %f %f' % (outputFile, minX, minY, minZ, maxX, maxY, maxZ)) 11 | 12 | 13 | def joinNode(node, nodeAbsPathA, nodeAbsPathB, nodeAbsPathO, hierarchyStepSize, extension, cmcommand): 14 | hrcFile = node + '.hrc' 15 | hrcA = None 16 | if os.path.isfile(nodeAbsPathA + '/' + hrcFile): 17 | # Check if there is data in this node in Octtree A (we check if the HRC file for this node exist) 18 | hrcA = utils.readHRC(nodeAbsPathA + '/' + hrcFile, hierarchyStepSize) 19 | if len(os.listdir(nodeAbsPathA)) == 2: 20 | hrcA[0][0] = utils.getPCFileDetails(nodeAbsPathA + '/' + node + extension)[0] 21 | hrcB = None 22 | if os.path.isfile(nodeAbsPathB + '/' + hrcFile): 23 | # Check if there is data in this node in Octtree B (we check if the HRC file for this node exist) 24 | hrcB = utils.readHRC(nodeAbsPathB + '/' + hrcFile, hierarchyStepSize) 25 | if len(os.listdir(nodeAbsPathB)) == 2: 26 | hrcB[0][0] = utils.getPCFileDetails(nodeAbsPathB + '/' + node + extension)[0] 27 | 28 | if hrcA != None and hrcB != None: 29 | utils.shellExecute('mkdir -p ' + nodeAbsPathO) 30 | # If both Octtrees A and B have data in this node we have to merge them 31 | hrcO = utils.initHRC(hierarchyStepSize) 32 | for level in range(hierarchyStepSize+2): 33 | numChildrenA = len(hrcA[level]) 34 | numChildrenB = len(hrcB[level]) 35 | numChildrenO = max((numChildrenA, numChildrenB)) 36 | if level < (hierarchyStepSize+1): 37 | for i in range(numChildrenO): 38 | hasNodeA = (i < numChildrenA) and (hrcA[level][i] > 0) 39 | hasNodeB = (i < numChildrenB) and (hrcB[level][i] > 0) 40 | (childNode, isFile) = utils.getNodeName(level, i, node, hierarchyStepSize, extension) 41 | if hasNodeA and hasNodeB: 42 | hrcO[level].append(hrcA[level][i] + hrcB[level][i]) 43 | #merge lAZ or folder (iteratively) 44 | if isFile: 45 | utils.shellExecute('lasmerge -i ' + nodeAbsPathA + '/' + childNode + ' ' + nodeAbsPathB + '/' + childNode + ' -o ' + nodeAbsPathO + '/' + childNode) 46 | #We now need to set the header of the output file as the input files (lasmerge will have shrink it and we do not want that 47 | fixHeader(nodeAbsPathA + '/' + childNode, nodeAbsPathO + '/' + childNode) 48 | else: 49 | joinNode(node + childNode, nodeAbsPathA + '/' + childNode, nodeAbsPathB + '/' + childNode, nodeAbsPathO + '/' + childNode, hierarchyStepSize, extension, cmcommand) 50 | elif hasNodeA: 51 | #mv / cp 52 | hrcO[level].append(hrcA[level][i]) 53 | utils.shellExecute(cmcommand + nodeAbsPathA + '/' + childNode + ' ' + nodeAbsPathO + '/' + childNode) 54 | elif hasNodeB: 55 | #mv / cp 56 | hrcO[level].append(hrcB[level][i]) 57 | utils.shellExecute(cmcommand + nodeAbsPathB + '/' + childNode + ' ' + nodeAbsPathO + '/' + childNode) 58 | else: 59 | hrcO[level].append(0) 60 | else: 61 | hrcO[level] = list(numpy.array(hrcA[level] + ([0]*(numChildrenO - numChildrenA))) + numpy.array(hrcB[level] + ([0]*(numChildrenO - numChildrenB)))) 62 | # Write the HRC file 63 | utils.writeHRC(nodeAbsPathO + '/' + hrcFile, hierarchyStepSize, hrcO) 64 | elif hrcA != None: 65 | # Only Octtree A has data in this node. We can directly copy it to the output Octtree 66 | utils.shellExecute(cmcommand + nodeAbsPathA + ' ' + nodeAbsPathO) 67 | elif hrcB != None: 68 | # Only Octtree B has data in this node. We can directly copy it to the output Octtree 69 | utils.shellExecute(cmcommand + nodeAbsPathB + ' ' + nodeAbsPathO) 70 | 71 | def createCloudJS(cloudJSA, cloudJSB, cloudJSO): 72 | result = False # Is the data properly merged 73 | 74 | cloudJSDataA = json.loads(open(cloudJSA, 'r').read()) 75 | cloudJSDataB = json.loads(open(cloudJSB, 'r').read()) 76 | 77 | cloudJSDataO = {} 78 | # Compare fields in the input cloud.js's that should be equal 79 | # We also write the fields in the output cloud.js 80 | for equalField in ["version", "octreeDir", "boundingBox", "pointAttributes", "spacing", "scale", "hierarchyStepSize"]: 81 | if cloudJSDataA[equalField] == cloudJSDataB[equalField]: 82 | cloudJSDataO[equalField] = cloudJSDataA[equalField] 83 | else: 84 | raise Exception('Error: Can not join cloud.js. Distinct ' + equalField + '!') 85 | 86 | # For the field "tightBoundingBox" we need to merge them since they can be different 87 | tbbA = cloudJSDataA["tightBoundingBox"] 88 | tbbB = cloudJSDataB["tightBoundingBox"] 89 | 90 | tbbO = {} 91 | tbbO["lx"] = min([tbbA["lx"], tbbB["lx"]]) 92 | tbbO["ly"] = min([tbbA["ly"], tbbB["ly"]]) 93 | tbbO["lz"] = min([tbbA["lz"], tbbB["lz"]]) 94 | tbbO["ux"] = max([tbbA["ux"], tbbB["ux"]]) 95 | tbbO["uy"] = max([tbbA["uy"], tbbB["uy"]]) 96 | tbbO["uz"] = max([tbbA["uz"], tbbB["uz"]]) 97 | cloudJSDataO["tightBoundingBox"] = tbbO 98 | 99 | hierarchyStepSize = cloudJSDataA['hierarchyStepSize'] 100 | 101 | cloudJSOFile = open(cloudJSO, 'w') 102 | cloudJSOFile.write(json.dumps(cloudJSDataO, indent=4)) 103 | cloudJSOFile.close() 104 | 105 | return hierarchyStepSize 106 | 107 | def run(inputFolderA, inputFolderB, outputFolder, moveFiles): 108 | # Check input parameters 109 | if (not os.path.isdir(inputFolderA)) or (not os.path.isdir(inputFolderB)): 110 | raise Exception('Error: Some of the input folder does not exist!') 111 | if os.path.isfile(outputFolder): 112 | raise Exception('Error: There is a file with the same name as the output folder. Please, delete it!') 113 | elif os.path.isdir(outputFolder) and os.listdir(outputFolder): 114 | raise Exception('Error: Output folder exists and it is not empty. Please, delete the data in the output folder!') 115 | 116 | # Make the paths absolute path 117 | inputFolderA = os.path.abspath(inputFolderA) 118 | inputFolderB = os.path.abspath(inputFolderB) 119 | outputFolder = os.path.abspath(outputFolder) 120 | 121 | if moveFiles: 122 | cmcommand = 'mv ' 123 | else: 124 | cmcommand = 'cp -r ' 125 | 126 | dataA = inputFolderA + '/data' 127 | dataB = inputFolderB + '/data' 128 | dataO = outputFolder + '/data' 129 | 130 | # Check if the octtrees have actual data (i.e. one folder with the root node) 131 | hasNodeA = os.listdir(dataA) == ['r',] 132 | hasNodeB = os.listdir(dataB) == ['r',] 133 | 134 | if hasNodeA or hasNodeB: 135 | utils.shellExecute('mkdir -p ' + outputFolder) 136 | if hasNodeA and hasNodeB: 137 | # If both Octrees have data we need to merge them 138 | # Create output cloud.js from joining the two input ones 139 | cloudJSA = inputFolderA + '/cloud.js' 140 | cloudJSB = inputFolderB + '/cloud.js' 141 | if not (os.path.isfile(cloudJSA)) or not (os.path.isfile(cloudJSB)): 142 | raise Exception('Error: Some cloud.js is missing!') 143 | # We also get the hierarchyStepSize 144 | hierarchyStepSize = createCloudJS(cloudJSA, cloudJSB, outputFolder + '/cloud.js') 145 | listFileRootA = os.listdir(dataA + '/r') 146 | if 'r.las' in listFileRootA: 147 | extension = 'las' 148 | elif 'r.laz' in listFileRootA: 149 | extension = 'laz' 150 | else: 151 | raise Exception('Error: ' + __file__ + ' only compatible with las/laz format') 152 | joinNode('r', dataA + '/r', dataB + '/r', dataO + '/r', hierarchyStepSize, extension, cmcommand) 153 | elif hasA: 154 | utils.shellExecute(cmcommand + inputFolderA + '/* ' + outputFolder) 155 | else: 156 | utils.shellExecute(cmcommand + inputFolderB + '/* ' + outputFolder) 157 | else: 158 | print ('Nothing to merge: both Octtrees are empty!') 159 | 160 | def argument_parser(): 161 | """ Define the arguments and return the parser object""" 162 | parser = argparse.ArgumentParser( 163 | description="Merge two Potree OctTrees into a single one") 164 | parser.add_argument('-a','--inputa',default='',help='Input Potree-OctTree A',type=str, required=True) 165 | parser.add_argument('-b','--inputb',default='',help='Input Potree-OctTree B',type=str, required=True) 166 | parser.add_argument('-o','--output',default='',help='Output Potree-OctTree',type=str, required=True) 167 | parser.add_argument('-m','--move',help='Use mv instead of cp when merging Potree-OctTrees. In this case the input data is partially dropped (but process will be faster due to less required IO) [default False]',default=False,action='store_true') 168 | return parser 169 | 170 | def main(): 171 | args = argument_parser().parse_args() 172 | print ('Input Potree Octtree A: ', args.inputa) 173 | print ('Input Potree Octtree B: ', args.inputb) 174 | print ('Output Potree Octtree: ', args.output) 175 | print ('Move: ', args.move) 176 | 177 | try: 178 | t0 = time.time() 179 | print ('Starting ' + os.path.basename(__file__) + '...') 180 | run(args.inputa, args.inputb, args.output, args.move) 181 | print ('Finished in %.2f seconds' % (time.time() - t0)) 182 | except: 183 | print ('Execution failed!') 184 | print (traceback.format_exc()) 185 | 186 | if __name__ == "__main__": 187 | main() 188 | -------------------------------------------------------------------------------- /pympc/merge_potree_all.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os, sys, time, argparse, traceback 3 | from pympc import utils 4 | from pympc import merge_potree 5 | 6 | def run(inputFolder, outputFolder, moveFiles): 7 | 8 | # List the elements in the input folder 9 | inputOctTrees = os.listdir(inputFolder) 10 | # Consider as valid Octrees the folder that have a data subdirectory 11 | validInputOctTrees = [] 12 | for inputOctTree in inputOctTrees: 13 | dataAbsPath = inputFolder + '/' + inputOctTree + '/data' 14 | if os.path.isdir(dataAbsPath) and len(os.listdir(dataAbsPath)): 15 | validInputOctTrees.append(inputOctTree) 16 | else: 17 | print ('Ignoring ' + inputOctTree) 18 | print (','.join(validInputOctTrees)) 19 | 20 | stdout = sys.stdout 21 | stderr = sys.stderr 22 | octTreeOutputFolder = None 23 | for mIndex in range(1, len(validInputOctTrees)): 24 | if mIndex == 1: 25 | octTreeAInputFolder = inputFolder + '/' + validInputOctTrees[0] 26 | else: 27 | octTreeAInputFolder = outputFolder + '/' + 'merged_%d' % (mIndex-1) 28 | octTreeBInputFolder = inputFolder + '/' + validInputOctTrees[mIndex] 29 | 30 | octTreeOutputFolder = outputFolder + '/' + 'merged_%d' % mIndex 31 | 32 | if os.path.isdir(octTreeOutputFolder): 33 | raise Exception(octTreeOutputFolder + ' already exists!') 34 | 35 | os.system('mkdir -p ' + octTreeOutputFolder) 36 | ofile = open(octTreeOutputFolder + '.log', 'w') 37 | efile = open(octTreeOutputFolder + '.err', 'w') 38 | sys.stdout = ofile 39 | sys.stderr = efile 40 | print ('Input Potree Octtree A: ', octTreeAInputFolder) 41 | print ('Input Potree Octtree B: ', octTreeBInputFolder) 42 | print ('Output Potree Octtree: ', octTreeOutputFolder) 43 | t0 = time.time() 44 | merge_potree.run(octTreeAInputFolder, octTreeBInputFolder, octTreeOutputFolder, moveFiles) 45 | print ('Finished in %.2f seconds' % (time.time() - t0)) 46 | ofile.close() 47 | efile.close() 48 | sys.stdout = stdout 49 | sys.stderr = stderr 50 | print("Final merged Potree-OctTree is in ", octTreeOutputFolder) 51 | 52 | def argument_parser(): 53 | """ Define the arguments and return the parser object""" 54 | parser = argparse.ArgumentParser( 55 | description="Merge all Potree-OctTrees in the input folder into a single one") 56 | parser.add_argument('-i','--input',default='',help='Input folder with different Potree-OctTrees',type=str, required=True) 57 | parser.add_argument('-o','--output',default='',help='Output/Execution folder for the various merging processes. The final merged Potree-OctTree will be a folder called merged_X with X the highest value',type=str, required=True) 58 | parser.add_argument('-m','--move',help='Use mv instead of cp when merging Potree-OctTrees. In this case the input data is partially dropped (but process will be faster due to less required IO) [default False]',default=False,action='store_true') 59 | return parser 60 | 61 | def main(): 62 | args = argument_parser().parse_args() 63 | print ('Input folder with Potree-OctTrees: ', args.input) 64 | print ('Output Potree OctTree: ', args.output) 65 | print ('Move: ', args.move) 66 | 67 | try: 68 | t0 = time.time() 69 | print ('Starting ' + os.path.basename(__file__) + '...') 70 | run(args.input, args.output, args.move) 71 | print ('Finished in %.2f seconds' % (time.time() - t0)) 72 | except: 73 | print ('Execution failed!') 74 | print (traceback.format_exc()) 75 | 76 | if __name__ == "__main__": 77 | main() 78 | -------------------------------------------------------------------------------- /pympc/sort_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse, traceback, time, os, multiprocessing 3 | from pympc import utils 4 | 5 | def runProcess(processIndex, tasksQueue, resultsQueue, outputFolder, runMode, useLink): 6 | kill_received = False 7 | while not kill_received: 8 | tileAbsPath = None 9 | try: 10 | # This call will patiently wait until new job is available 11 | tileAbsPath = tasksQueue.get() 12 | except: 13 | # if there is an error we will quit 14 | kill_received = True 15 | if tileAbsPath == None: 16 | # If we receive a None job, it means we can stop 17 | kill_received = True 18 | else: 19 | tileOutputFolder = outputFolder + '/' + os.path.basename(tileAbsPath) 20 | tileFilesAbsPaths = utils.getFiles(tileAbsPath, recursive = True) 21 | for i,tileFileAbsPath in enumerate(tileFilesAbsPaths): 22 | if (len(tileFilesAbsPaths) == 1) and os.path.isfile(tileAbsPath): 23 | outputAbsPath = tileOutputFolder 24 | else: 25 | if i == 0: 26 | os.system('mkdir -p ' + tileOutputFolder) 27 | outputAbsPath = outputFolder + '/' + os.path.basename(tileAbsPath) + '/' + os.path.basename(tileFileAbsPath) 28 | commands = [] 29 | if 's' in runMode: 30 | cmd = os.environ["LASSORT"] 31 | commands.append(cmd + ' -i ' + tileFileAbsPath + ' -o ' + outputAbsPath) 32 | else: 33 | if useLink: 34 | commands.append('ln -s ' + tileFileAbsPath + ' ' + outputAbsPath) 35 | else: 36 | commands.append('cp ' + tileFileAbsPath + ' ' + outputAbsPath) 37 | if 'i' in runMode: 38 | commands.append('lasindex -i ' + outputAbsPath) 39 | for command in commands: 40 | utils.shellExecute(command, True) 41 | resultsQueue.put((processIndex, tileAbsPath)) 42 | 43 | def run(inputFolder, outputFolder, runMode, useLink, numberProcs): 44 | # Check input parameters 45 | if not os.path.isdir(inputFolder): 46 | raise Exception('Error: Input folder does not exist!') 47 | if os.path.isfile(outputFolder): 48 | raise Exception('Error: There is a file with the same name as the output folder. Please, delete it!') 49 | elif os.path.isdir(outputFolder) and os.listdir(outputFolder): 50 | raise Exception('Error: Output folder exists and it is not empty. Please, delete the data in the output folder!') 51 | if runMode not in ('s','i','si','is'): 52 | raise Exception('Error: running mode must be s, i, or si') 53 | 54 | 55 | # Make it absolute path 56 | inputFolder = os.path.abspath(inputFolder) 57 | 58 | utils.shellExecute('mkdir -p ' + outputFolder) 59 | 60 | # Create queues for the distributed processing 61 | tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles) 62 | resultsQueue = multiprocessing.Queue() # The queue of results 63 | 64 | tilesNames = os.listdir(inputFolder) 65 | if 'tiles.js' in tilesNames: 66 | tilesNames.remove('tiles.js') 67 | utils.shellExecute('cp ' + inputFolder + '/tiles.js ' + outputFolder+ '/tiles.js') 68 | 69 | numTiles = len(tilesNames) 70 | 71 | # Add tasks/inputFiles 72 | for i in range(numTiles): 73 | tasksQueue.put(inputFolder + '/' + tilesNames[i]) 74 | for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO) 75 | tasksQueue.put(None) 76 | 77 | processes = [] 78 | # We start numberProcs users processes 79 | for i in range(numberProcs): 80 | processes.append(multiprocessing.Process(target=runProcess, 81 | args=(i, tasksQueue, resultsQueue, outputFolder, runMode, useLink))) 82 | processes[-1].start() 83 | 84 | # Get all the results (actually we do not need the returned values) 85 | for i in range(numTiles): 86 | resultsQueue.get() 87 | print ('Completed %d of %d (%.02f%%)' % (i+1, numTiles, 100. * float(i+1) / float(numTiles))) 88 | # wait for all users to finish their execution 89 | for i in range(numberProcs): 90 | processes[i].join() 91 | 92 | 93 | 94 | def argument_parser(): 95 | """ Define the arguments and return the parser object""" 96 | parser = argparse.ArgumentParser( 97 | description="Sort and index with LAStools a folder with point cloud elements (LAS/LAZ files or subfolders containing LAS/LAZ files). The sorted/index data is copied in the output folder. To enable ") 98 | parser.add_argument('-i','--input',default='',help='Input folder with the point cloud elements (a element can be a single LAS/LAZ file or a folder with LAS/LAZ files)',type=str, required=True) 99 | parser.add_argument('-o','--output',default='',help='Output folder for the sorted and indexed elements',type=str, required=True) 100 | parser.add_argument('-m','--mode',default='si',help='Running mode. Specify s to run only the lassort, i to run only the lasindex and si to run both. [default is si, i.e. to run both lassort and lasindex].',type=str) 101 | parser.add_argument('-l','--link',help='Use ln -s instead of cp when filling in the output folder. This only applies if mode is i, i.e. no sorting [default False]',default=False,action='store_true') 102 | parser.add_argument('-c','--proc',default=1,help='Number of processes [default is 1]',type=int) 103 | return parser 104 | 105 | def main(): 106 | args = argument_parser().parse_args() 107 | print ('Input folder: ', args.input) 108 | print ('Output folder: ', args.output) 109 | print ('Mode: ', args.mode) 110 | print ('Use Link: ', args.link) 111 | print ('Number of processes: ', args.proc) 112 | 113 | # Check the LAStools lassort.exe is installed 114 | if ('s' in args.mode): 115 | cmd = os.environ.get("LASSORT",None) 116 | if(not cmd or utils.shellExecute(cmd + ' -version').count('LAStools') == 0): 117 | raise Exception("LAStools lassort.exe is not found!. Please define LASSORT environment variable!") 118 | try: 119 | t0 = time.time() 120 | print ('Starting ' + os.path.basename(__file__) + '...') 121 | run(args.input, args.output, args.mode, args.link, args.proc) 122 | print ('Finished in %.2f seconds' % (time.time() - t0)) 123 | except: 124 | print ('Execution failed!') 125 | print (traceback.format_exc()) 126 | 127 | if __name__ == "__main__": 128 | main() 129 | -------------------------------------------------------------------------------- /pympc/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Various methods reused in main scripts""" 3 | import sys, os, glob2, subprocess, struct, numpy, math, multiprocessing 4 | 5 | PC_FILE_FORMATS = ['las','laz'] 6 | OCTTREE_NODE_NUM_CHILDREN = 8 7 | 8 | DB_NAME = 'pc_extents' 9 | DB_TABLE_RAW = 'extent_raw' 10 | DB_TABLE_POTREE = 'extent_potree' 11 | DB_TABLE_POTREE_DIST = 'potree_dist' 12 | 13 | def shellExecute(command, showOutErr = False): 14 | """ Execute the command in the SHELL and shows both stdout and stderr""" 15 | print(command) 16 | (out,err) = subprocess.Popen(command, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() 17 | r = '\n'.join((out.decode("utf-8") , err.decode("utf-8"))) 18 | if showOutErr: 19 | print(r) 20 | return r 21 | 22 | def getUserName(): 23 | return os.popen('whoami').read().replace('\n','') 24 | 25 | def getConnectString(dbName = None, userName= None, password = None, dbHost = None, dbPort = None, cline = False): 26 | """ Gets the connection string to be used by psycopg2 (if cline is False) 27 | or by psql (if cline is True)""" 28 | connString='' 29 | if cline: 30 | if dbName != None and dbName != '': 31 | connString += " " + dbName 32 | if userName != None and userName != '': 33 | connString += " -U " + userName 34 | if password != None and password != '': 35 | os.environ['PGPASSWORD'] = password 36 | if dbHost != None and dbHost != '': 37 | connString += " -h " + dbHost 38 | if dbPort != None and dbPort != '': 39 | connString += " -p " + dbPort 40 | else: 41 | if dbName != None and dbName != '': 42 | connString += " dbname=" + dbName 43 | if userName != None and userName != '': 44 | connString += " user=" + userName 45 | if password != None and password != '': 46 | connString += " password=" + password 47 | if dbHost != None and dbHost != '': 48 | connString += " host=" + dbHost 49 | if dbPort != None and dbPort != '': 50 | connString += " port=" + dbPort 51 | return connString 52 | 53 | def getFiles(inputElement, extensions = PC_FILE_FORMATS, recursive = False): 54 | """ Get the list of files with certain extensions contained in the folder (and possible 55 | subfolders) given by inputElement. If inputElement is directly a file it 56 | returns a list with only one element, the given file """ 57 | # If extensions is not a list but a string we converted to a list 58 | if type(extensions) == str: 59 | extensions = [extensions,] 60 | # If input element is file, return it 61 | if(os.path.isfile(inputElement)): 62 | fname,fext = os.path.splitext(inputElement) 63 | return [inputElement] if fext.lower() in extensions else [] 64 | # Else, use recursive globbing 65 | files = [] 66 | globpath = os.path.join(inputElement,'**') if recursive else inputElement 67 | for ext in extensions: 68 | files.extend(glob2.glob(os.path.join(globpath,'*.' + ext))) 69 | files.extend(glob2.glob(os.path.join(globpath,'*.' + ext.upper()))) 70 | return list(set(files)) 71 | 72 | 73 | def getPCFileDetails(absPath): 74 | """ Get the details (count numPoints and extent) of a LAS/LAZ file (using LAStools, hence it is fast)""" 75 | count = None 76 | (minX, minY, minZ, maxX, maxY, maxZ) = (None, None, None, None, None, None) 77 | (scaleX, scaleY, scaleZ) = (None, None, None) 78 | (offsetX, offsetY, offsetZ) = (None, None, None) 79 | 80 | command = 'lasinfo ' + absPath + ' -nc -nv -nco' 81 | for line in shellExecute(command).split('\n'): 82 | if line.count('min x y z:'): 83 | [minX, minY, minZ] = line.split(':')[-1].strip().split(' ') 84 | minX = float(minX) 85 | minY = float(minY) 86 | minZ = float(minZ) 87 | elif line.count('max x y z:'): 88 | [maxX, maxY, maxZ] = line.split(':')[-1].strip().split(' ') 89 | maxX = float(maxX) 90 | maxY = float(maxY) 91 | maxZ = float(maxZ) 92 | elif line.count('number of point records:'): 93 | count = int(line.split(':')[-1].strip()) 94 | elif line.count('scale factor x y z:'): 95 | [scaleX, scaleY, scaleZ] = line.split(':')[-1].strip().split(' ') 96 | scaleX = float(scaleX) 97 | scaleY = float(scaleY) 98 | scaleZ = float(scaleZ) 99 | elif line.count('offset x y z:'): 100 | [offsetX, offsetY, offsetZ] = line.split(':')[-1].strip().split(' ') 101 | offsetX = float(offsetX) 102 | offsetY = float(offsetY) 103 | offsetZ = float(offsetZ) 104 | return (count, minX, minY, minZ, maxX, maxY, maxZ, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ) 105 | 106 | def getPCFolderDetails(absPath, numProc = 1): 107 | """ Get the details (count numPoints and extent) of a folder with LAS/LAZ files (using LAStools)""" 108 | tcount = 0 109 | (tminx, tminy, tminz, tmaxx, tmaxy, tmaxz) = (None, None, None, None, None, None) 110 | (tscalex, tscaley, tscalez) = (None, None, None) 111 | 112 | if os.path.isdir(absPath): 113 | inputFiles = getFiles(absPath, recursive=True) 114 | else: 115 | inputFiles = [absPath,] 116 | 117 | numInputFiles = len(inputFiles) 118 | 119 | tasksQueue = multiprocessing.Queue() # The queue of tasks 120 | detailsQueue = multiprocessing.Queue() # The queue of results/details 121 | 122 | for i in range(numInputFiles): 123 | tasksQueue.put(inputFiles[i]) 124 | for i in range(numProc): #we add as many None jobs as numProc to tell them to terminate (queue is FIFO) 125 | tasksQueue.put(None) 126 | 127 | workers = [] 128 | # We start numProc users workers 129 | for i in range(numProc): 130 | workers.append(multiprocessing.Process(target=runProcGetPCFolderDetailsWorker, 131 | args=(tasksQueue, detailsQueue))) 132 | workers[-1].start() 133 | 134 | for i in range(numInputFiles): 135 | sys.stdout.write('\r') 136 | (count, minx, miny, minz, maxx, maxy, maxz, scalex, scaley, scalez, _, _, _) = detailsQueue.get() 137 | if i == 0: 138 | (tscalex, tscaley, tscalez) = (scalex, scaley, scalez) 139 | 140 | tcount += count 141 | if count: 142 | if tminx == None or minx < tminx: 143 | tminx = minx 144 | if tminy == None or miny < tminy: 145 | tminy = miny 146 | if tminz == None or minz < tminz: 147 | tminz = minz 148 | if tmaxx == None or maxx > tmaxx: 149 | tmaxx = maxx 150 | if tmaxy == None or maxy > tmaxy: 151 | tmaxy = maxy 152 | if tmaxz == None or maxz > tmaxz: 153 | tmaxz = maxz 154 | sys.stdout.write("\rCompleted %.02f%%" % (100. * float(i) / float(numInputFiles))) 155 | sys.stdout.flush() 156 | sys.stdout.write('\r') 157 | sys.stdout.write('\rCompleted 100.00%!') 158 | 159 | # wait for all users to finish their execution 160 | for i in range(numProc): 161 | workers[i].join() 162 | 163 | print() 164 | return (inputFiles, tcount, tminx, tminy, tminz, tmaxx, tmaxy, tmaxz, tscalex, tscaley, tscalez) 165 | 166 | def runProcGetPCFolderDetailsWorker(tasksQueue, detailsQueue): 167 | kill_received = False 168 | while not kill_received: 169 | job = None 170 | try: 171 | # This call will patiently wait until new job is available 172 | job = tasksQueue.get() 173 | except: 174 | # if there is an error we will quit the loop 175 | kill_received = True 176 | if job == None: 177 | kill_received = True 178 | else: 179 | detailsQueue.put(getPCFileDetails(job)) 180 | 181 | 182 | def getNode(binaryFile, level, data, lastInLevel, hierarchyStepSize): 183 | # Read a node from the binary file 184 | b = struct.unpack('B', binaryFile.read(1))[0] 185 | n = struct.unpack('I', binaryFile.read(4))[0] 186 | 187 | for i in range(OCTTREE_NODE_NUM_CHILDREN): 188 | # We will store a positive number if the child i exists, 0 otherwise 189 | data[level].append((1< 0)[::-1].index(True) # We get the index of the last node in the next level which has data 194 | for j in range(lastInNextLevel + 1): # For all the nodes until the one with data, we read the node in the next level 195 | if data[level][j]: 196 | data[level][j] = getNode(binaryFile, level+1, data, j == lastInNextLevel, hierarchyStepSize) 197 | else: 198 | data[level+1].extend([0] * OCTTREE_NODE_NUM_CHILDREN) # If there is no data we still fill 0s to have consistent trees 199 | return n 200 | 201 | def initHRC(hierarchyStepSize): 202 | data = {} 203 | for i in range(hierarchyStepSize+2): #In each HRC file we have info about the current level, hierarchyStepSize more, and only existence info in hierarchyStepSize+1, so total of hierarchyStepSize+2 204 | data[i] = [] 205 | return data 206 | 207 | def readHRC(hrcFileAbsPath, hierarchyStepSize): 208 | data = initHRC(hierarchyStepSize) 209 | data[0].append(getNode(open(hrcFileAbsPath, "rb"), 1, data, True, hierarchyStepSize)) 210 | return data 211 | 212 | def writeHRC(hrcFileAbsPath, hierarchyStepSize, data): 213 | oFile = open(hrcFileAbsPath, "wb") 214 | for i in range(hierarchyStepSize+1): 215 | for j in range(len(data[i])): 216 | if data[i][j]: 217 | m = data[i+1][OCTTREE_NODE_NUM_CHILDREN*j:OCTTREE_NODE_NUM_CHILDREN*(j+1)] 218 | mask= 0 219 | for k in range(OCTTREE_NODE_NUM_CHILDREN): 220 | if k < len(m) and m[k]: 221 | mask += 1<