├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── conf ├── lopocs.sample.yml └── lopocs.uwsgi.sample.yml ├── docs ├── advanced_usage.rst ├── airport.png ├── api.png ├── api_3dtiles.png ├── api_greyhound.png ├── api_infos.png ├── grandlyon.png ├── itowns_montreal1_header.png ├── lopocs.png ├── lopocs.svg ├── potree_schema_scale_001.sql ├── potree_schema_scale_01.sql └── stsulpice.png ├── lopocs ├── __init__.py ├── app.py ├── cesium.py ├── cli.py ├── conf.py ├── database.py ├── greyhound.py ├── potreeschema.py ├── stats.py ├── threedtiles.py ├── utils.py └── wsgi.py ├── setup.py └── tests ├── __init__.py └── test_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | lopocs.egg-info/ 3 | dist/ 4 | venv/ 5 | conf/lopocs.uwsgi.yml 6 | conf/lopocs.yml 7 | outdir 8 | *.las 9 | *.pyc 10 | *.e57 11 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | 4 | language: python 5 | python: 6 | - "3.5" 7 | 8 | before_install: 9 | - sudo apt-get install libgdal-dev 10 | 11 | install: 12 | - pip install --upgrade pip 13 | - pip install -e .[dev] 14 | 15 | script: py.test 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 2.1, February 1999 3 | 4 | Copyright (C) 1991, 1999 Free Software Foundation, Inc. 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | (This is the first released version of the Lesser GPL. It also counts 10 | as the successor of the GNU Library Public License, version 2, hence 11 | the version number 2.1.) 12 | 13 | Preamble 14 | 15 | The licenses for most software are designed to take away your 16 | freedom to share and change it. By contrast, the GNU General Public 17 | Licenses are intended to guarantee your freedom to share and change 18 | free software--to make sure the software is free for all its users. 19 | 20 | This license, the Lesser General Public License, applies to some 21 | specially designated software packages--typically libraries--of the 22 | Free Software Foundation and other authors who decide to use it. You 23 | can use it too, but we suggest you first think carefully about whether 24 | this license or the ordinary General Public License is the better 25 | strategy to use in any particular case, based on the explanations below. 26 | 27 | When we speak of free software, we are referring to freedom of use, 28 | not price. Our General Public Licenses are designed to make sure that 29 | you have the freedom to distribute copies of free software (and charge 30 | for this service if you wish); that you receive source code or can get 31 | it if you want it; that you can change the software and use pieces of 32 | it in new free programs; and that you are informed that you can do 33 | these things. 34 | 35 | To protect your rights, we need to make restrictions that forbid 36 | distributors to deny you these rights or to ask you to surrender these 37 | rights. These restrictions translate to certain responsibilities for 38 | you if you distribute copies of the library or if you modify it. 39 | 40 | For example, if you distribute copies of the library, whether gratis 41 | or for a fee, you must give the recipients all the rights that we gave 42 | you. You must make sure that they, too, receive or can get the source 43 | code. If you link other code with the library, you must provide 44 | complete object files to the recipients, so that they can relink them 45 | with the library after making changes to the library and recompiling 46 | it. And you must show them these terms so they know their rights. 47 | 48 | We protect your rights with a two-step method: (1) we copyright the 49 | library, and (2) we offer you this license, which gives you legal 50 | permission to copy, distribute and/or modify the library. 51 | 52 | To protect each distributor, we want to make it very clear that 53 | there is no warranty for the free library. Also, if the library is 54 | modified by someone else and passed on, the recipients should know 55 | that what they have is not the original version, so that the original 56 | author's reputation will not be affected by problems that might be 57 | introduced by others. 58 | 59 | Finally, software patents pose a constant threat to the existence of 60 | any free program. We wish to make sure that a company cannot 61 | effectively restrict the users of a free program by obtaining a 62 | restrictive license from a patent holder. Therefore, we insist that 63 | any patent license obtained for a version of the library must be 64 | consistent with the full freedom of use specified in this license. 65 | 66 | Most GNU software, including some libraries, is covered by the 67 | ordinary GNU General Public License. This license, the GNU Lesser 68 | General Public License, applies to certain designated libraries, and 69 | is quite different from the ordinary General Public License. We use 70 | this license for certain libraries in order to permit linking those 71 | libraries into non-free programs. 72 | 73 | When a program is linked with a library, whether statically or using 74 | a shared library, the combination of the two is legally speaking a 75 | combined work, a derivative of the original library. The ordinary 76 | General Public License therefore permits such linking only if the 77 | entire combination fits its criteria of freedom. The Lesser General 78 | Public License permits more lax criteria for linking other code with 79 | the library. 80 | 81 | We call this license the "Lesser" General Public License because it 82 | does Less to protect the user's freedom than the ordinary General 83 | Public License. It also provides other free software developers Less 84 | of an advantage over competing non-free programs. These disadvantages 85 | are the reason we use the ordinary General Public License for many 86 | libraries. However, the Lesser license provides advantages in certain 87 | special circumstances. 88 | 89 | For example, on rare occasions, there may be a special need to 90 | encourage the widest possible use of a certain library, so that it becomes 91 | a de-facto standard. To achieve this, non-free programs must be 92 | allowed to use the library. A more frequent case is that a free 93 | library does the same job as widely used non-free libraries. In this 94 | case, there is little to gain by limiting the free library to free 95 | software only, so we use the Lesser General Public License. 96 | 97 | In other cases, permission to use a particular library in non-free 98 | programs enables a greater number of people to use a large body of 99 | free software. For example, permission to use the GNU C Library in 100 | non-free programs enables many more people to use the whole GNU 101 | operating system, as well as its variant, the GNU/Linux operating 102 | system. 103 | 104 | Although the Lesser General Public License is Less protective of the 105 | users' freedom, it does ensure that the user of a program that is 106 | linked with the Library has the freedom and the wherewithal to run 107 | that program using a modified version of the Library. 108 | 109 | The precise terms and conditions for copying, distribution and 110 | modification follow. Pay close attention to the difference between a 111 | "work based on the library" and a "work that uses the library". The 112 | former contains code derived from the library, whereas the latter must 113 | be combined with the library in order to run. 114 | 115 | GNU LESSER GENERAL PUBLIC LICENSE 116 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 117 | 118 | 0. This License Agreement applies to any software library or other 119 | program which contains a notice placed by the copyright holder or 120 | other authorized party saying it may be distributed under the terms of 121 | this Lesser General Public License (also called "this License"). 122 | Each licensee is addressed as "you". 123 | 124 | A "library" means a collection of software functions and/or data 125 | prepared so as to be conveniently linked with application programs 126 | (which use some of those functions and data) to form executables. 127 | 128 | The "Library", below, refers to any such software library or work 129 | which has been distributed under these terms. A "work based on the 130 | Library" means either the Library or any derivative work under 131 | copyright law: that is to say, a work containing the Library or a 132 | portion of it, either verbatim or with modifications and/or translated 133 | straightforwardly into another language. (Hereinafter, translation is 134 | included without limitation in the term "modification".) 135 | 136 | "Source code" for a work means the preferred form of the work for 137 | making modifications to it. For a library, complete source code means 138 | all the source code for all modules it contains, plus any associated 139 | interface definition files, plus the scripts used to control compilation 140 | and installation of the library. 141 | 142 | Activities other than copying, distribution and modification are not 143 | covered by this License; they are outside its scope. The act of 144 | running a program using the Library is not restricted, and output from 145 | such a program is covered only if its contents constitute a work based 146 | on the Library (independent of the use of the Library in a tool for 147 | writing it). Whether that is true depends on what the Library does 148 | and what the program that uses the Library does. 149 | 150 | 1. You may copy and distribute verbatim copies of the Library's 151 | complete source code as you receive it, in any medium, provided that 152 | you conspicuously and appropriately publish on each copy an 153 | appropriate copyright notice and disclaimer of warranty; keep intact 154 | all the notices that refer to this License and to the absence of any 155 | warranty; and distribute a copy of this License along with the 156 | Library. 157 | 158 | You may charge a fee for the physical act of transferring a copy, 159 | and you may at your option offer warranty protection in exchange for a 160 | fee. 161 | 162 | 2. You may modify your copy or copies of the Library or any portion 163 | of it, thus forming a work based on the Library, and copy and 164 | distribute such modifications or work under the terms of Section 1 165 | above, provided that you also meet all of these conditions: 166 | 167 | a) The modified work must itself be a software library. 168 | 169 | b) You must cause the files modified to carry prominent notices 170 | stating that you changed the files and the date of any change. 171 | 172 | c) You must cause the whole of the work to be licensed at no 173 | charge to all third parties under the terms of this License. 174 | 175 | d) If a facility in the modified Library refers to a function or a 176 | table of data to be supplied by an application program that uses 177 | the facility, other than as an argument passed when the facility 178 | is invoked, then you must make a good faith effort to ensure that, 179 | in the event an application does not supply such function or 180 | table, the facility still operates, and performs whatever part of 181 | its purpose remains meaningful. 182 | 183 | (For example, a function in a library to compute square roots has 184 | a purpose that is entirely well-defined independent of the 185 | application. Therefore, Subsection 2d requires that any 186 | application-supplied function or table used by this function must 187 | be optional: if the application does not supply it, the square 188 | root function must still compute square roots.) 189 | 190 | These requirements apply to the modified work as a whole. If 191 | identifiable sections of that work are not derived from the Library, 192 | and can be reasonably considered independent and separate works in 193 | themselves, then this License, and its terms, do not apply to those 194 | sections when you distribute them as separate works. But when you 195 | distribute the same sections as part of a whole which is a work based 196 | on the Library, the distribution of the whole must be on the terms of 197 | this License, whose permissions for other licensees extend to the 198 | entire whole, and thus to each and every part regardless of who wrote 199 | it. 200 | 201 | Thus, it is not the intent of this section to claim rights or contest 202 | your rights to work written entirely by you; rather, the intent is to 203 | exercise the right to control the distribution of derivative or 204 | collective works based on the Library. 205 | 206 | In addition, mere aggregation of another work not based on the Library 207 | with the Library (or with a work based on the Library) on a volume of 208 | a storage or distribution medium does not bring the other work under 209 | the scope of this License. 210 | 211 | 3. You may opt to apply the terms of the ordinary GNU General Public 212 | License instead of this License to a given copy of the Library. To do 213 | this, you must alter all the notices that refer to this License, so 214 | that they refer to the ordinary GNU General Public License, version 2, 215 | instead of to this License. (If a newer version than version 2 of the 216 | ordinary GNU General Public License has appeared, then you can specify 217 | that version instead if you wish.) Do not make any other change in 218 | these notices. 219 | 220 | Once this change is made in a given copy, it is irreversible for 221 | that copy, so the ordinary GNU General Public License applies to all 222 | subsequent copies and derivative works made from that copy. 223 | 224 | This option is useful when you wish to copy part of the code of 225 | the Library into a program that is not a library. 226 | 227 | 4. You may copy and distribute the Library (or a portion or 228 | derivative of it, under Section 2) in object code or executable form 229 | under the terms of Sections 1 and 2 above provided that you accompany 230 | it with the complete corresponding machine-readable source code, which 231 | must be distributed under the terms of Sections 1 and 2 above on a 232 | medium customarily used for software interchange. 233 | 234 | If distribution of object code is made by offering access to copy 235 | from a designated place, then offering equivalent access to copy the 236 | source code from the same place satisfies the requirement to 237 | distribute the source code, even though third parties are not 238 | compelled to copy the source along with the object code. 239 | 240 | 5. A program that contains no derivative of any portion of the 241 | Library, but is designed to work with the Library by being compiled or 242 | linked with it, is called a "work that uses the Library". Such a 243 | work, in isolation, is not a derivative work of the Library, and 244 | therefore falls outside the scope of this License. 245 | 246 | However, linking a "work that uses the Library" with the Library 247 | creates an executable that is a derivative of the Library (because it 248 | contains portions of the Library), rather than a "work that uses the 249 | library". The executable is therefore covered by this License. 250 | Section 6 states terms for distribution of such executables. 251 | 252 | When a "work that uses the Library" uses material from a header file 253 | that is part of the Library, the object code for the work may be a 254 | derivative work of the Library even though the source code is not. 255 | Whether this is true is especially significant if the work can be 256 | linked without the Library, or if the work is itself a library. The 257 | threshold for this to be true is not precisely defined by law. 258 | 259 | If such an object file uses only numerical parameters, data 260 | structure layouts and accessors, and small macros and small inline 261 | functions (ten lines or less in length), then the use of the object 262 | file is unrestricted, regardless of whether it is legally a derivative 263 | work. (Executables containing this object code plus portions of the 264 | Library will still fall under Section 6.) 265 | 266 | Otherwise, if the work is a derivative of the Library, you may 267 | distribute the object code for the work under the terms of Section 6. 268 | Any executables containing that work also fall under Section 6, 269 | whether or not they are linked directly with the Library itself. 270 | 271 | 6. As an exception to the Sections above, you may also combine or 272 | link a "work that uses the Library" with the Library to produce a 273 | work containing portions of the Library, and distribute that work 274 | under terms of your choice, provided that the terms permit 275 | modification of the work for the customer's own use and reverse 276 | engineering for debugging such modifications. 277 | 278 | You must give prominent notice with each copy of the work that the 279 | Library is used in it and that the Library and its use are covered by 280 | this License. You must supply a copy of this License. If the work 281 | during execution displays copyright notices, you must include the 282 | copyright notice for the Library among them, as well as a reference 283 | directing the user to the copy of this License. Also, you must do one 284 | of these things: 285 | 286 | a) Accompany the work with the complete corresponding 287 | machine-readable source code for the Library including whatever 288 | changes were used in the work (which must be distributed under 289 | Sections 1 and 2 above); and, if the work is an executable linked 290 | with the Library, with the complete machine-readable "work that 291 | uses the Library", as object code and/or source code, so that the 292 | user can modify the Library and then relink to produce a modified 293 | executable containing the modified Library. (It is understood 294 | that the user who changes the contents of definitions files in the 295 | Library will not necessarily be able to recompile the application 296 | to use the modified definitions.) 297 | 298 | b) Use a suitable shared library mechanism for linking with the 299 | Library. A suitable mechanism is one that (1) uses at run time a 300 | copy of the library already present on the user's computer system, 301 | rather than copying library functions into the executable, and (2) 302 | will operate properly with a modified version of the library, if 303 | the user installs one, as long as the modified version is 304 | interface-compatible with the version that the work was made with. 305 | 306 | c) Accompany the work with a written offer, valid for at 307 | least three years, to give the same user the materials 308 | specified in Subsection 6a, above, for a charge no more 309 | than the cost of performing this distribution. 310 | 311 | d) If distribution of the work is made by offering access to copy 312 | from a designated place, offer equivalent access to copy the above 313 | specified materials from the same place. 314 | 315 | e) Verify that the user has already received a copy of these 316 | materials or that you have already sent this user a copy. 317 | 318 | For an executable, the required form of the "work that uses the 319 | Library" must include any data and utility programs needed for 320 | reproducing the executable from it. However, as a special exception, 321 | the materials to be distributed need not include anything that is 322 | normally distributed (in either source or binary form) with the major 323 | components (compiler, kernel, and so on) of the operating system on 324 | which the executable runs, unless that component itself accompanies 325 | the executable. 326 | 327 | It may happen that this requirement contradicts the license 328 | restrictions of other proprietary libraries that do not normally 329 | accompany the operating system. Such a contradiction means you cannot 330 | use both them and the Library together in an executable that you 331 | distribute. 332 | 333 | 7. You may place library facilities that are a work based on the 334 | Library side-by-side in a single library together with other library 335 | facilities not covered by this License, and distribute such a combined 336 | library, provided that the separate distribution of the work based on 337 | the Library and of the other library facilities is otherwise 338 | permitted, and provided that you do these two things: 339 | 340 | a) Accompany the combined library with a copy of the same work 341 | based on the Library, uncombined with any other library 342 | facilities. This must be distributed under the terms of the 343 | Sections above. 344 | 345 | b) Give prominent notice with the combined library of the fact 346 | that part of it is a work based on the Library, and explaining 347 | where to find the accompanying uncombined form of the same work. 348 | 349 | 8. You may not copy, modify, sublicense, link with, or distribute 350 | the Library except as expressly provided under this License. Any 351 | attempt otherwise to copy, modify, sublicense, link with, or 352 | distribute the Library is void, and will automatically terminate your 353 | rights under this License. However, parties who have received copies, 354 | or rights, from you under this License will not have their licenses 355 | terminated so long as such parties remain in full compliance. 356 | 357 | 9. You are not required to accept this License, since you have not 358 | signed it. However, nothing else grants you permission to modify or 359 | distribute the Library or its derivative works. These actions are 360 | prohibited by law if you do not accept this License. Therefore, by 361 | modifying or distributing the Library (or any work based on the 362 | Library), you indicate your acceptance of this License to do so, and 363 | all its terms and conditions for copying, distributing or modifying 364 | the Library or works based on it. 365 | 366 | 10. Each time you redistribute the Library (or any work based on the 367 | Library), the recipient automatically receives a license from the 368 | original licensor to copy, distribute, link with or modify the Library 369 | subject to these terms and conditions. You may not impose any further 370 | restrictions on the recipients' exercise of the rights granted herein. 371 | You are not responsible for enforcing compliance by third parties with 372 | this License. 373 | 374 | 11. If, as a consequence of a court judgment or allegation of patent 375 | infringement or for any other reason (not limited to patent issues), 376 | conditions are imposed on you (whether by court order, agreement or 377 | otherwise) that contradict the conditions of this License, they do not 378 | excuse you from the conditions of this License. If you cannot 379 | distribute so as to satisfy simultaneously your obligations under this 380 | License and any other pertinent obligations, then as a consequence you 381 | may not distribute the Library at all. For example, if a patent 382 | license would not permit royalty-free redistribution of the Library by 383 | all those who receive copies directly or indirectly through you, then 384 | the only way you could satisfy both it and this License would be to 385 | refrain entirely from distribution of the Library. 386 | 387 | If any portion of this section is held invalid or unenforceable under any 388 | particular circumstance, the balance of the section is intended to apply, 389 | and the section as a whole is intended to apply in other circumstances. 390 | 391 | It is not the purpose of this section to induce you to infringe any 392 | patents or other property right claims or to contest validity of any 393 | such claims; this section has the sole purpose of protecting the 394 | integrity of the free software distribution system which is 395 | implemented by public license practices. Many people have made 396 | generous contributions to the wide range of software distributed 397 | through that system in reliance on consistent application of that 398 | system; it is up to the author/donor to decide if he or she is willing 399 | to distribute software through any other system and a licensee cannot 400 | impose that choice. 401 | 402 | This section is intended to make thoroughly clear what is believed to 403 | be a consequence of the rest of this License. 404 | 405 | 12. If the distribution and/or use of the Library is restricted in 406 | certain countries either by patents or by copyrighted interfaces, the 407 | original copyright holder who places the Library under this License may add 408 | an explicit geographical distribution limitation excluding those countries, 409 | so that distribution is permitted only in or among countries not thus 410 | excluded. In such case, this License incorporates the limitation as if 411 | written in the body of this License. 412 | 413 | 13. The Free Software Foundation may publish revised and/or new 414 | versions of the Lesser General Public License from time to time. 415 | Such new versions will be similar in spirit to the present version, 416 | but may differ in detail to address new problems or concerns. 417 | 418 | Each version is given a distinguishing version number. If the Library 419 | specifies a version number of this License which applies to it and 420 | "any later version", you have the option of following the terms and 421 | conditions either of that version or of any later version published by 422 | the Free Software Foundation. If the Library does not specify a 423 | license version number, you may choose any version ever published by 424 | the Free Software Foundation. 425 | 426 | 14. If you wish to incorporate parts of the Library into other free 427 | programs whose distribution conditions are incompatible with these, 428 | write to the author to ask for permission. For software which is 429 | copyrighted by the Free Software Foundation, write to the Free 430 | Software Foundation; we sometimes make exceptions for this. Our 431 | decision will be guided by the two goals of preserving the free status 432 | of all derivatives of our free software and of promoting the sharing 433 | and reuse of software generally. 434 | 435 | NO WARRANTY 436 | 437 | 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO 438 | WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. 439 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR 440 | OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY 441 | KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE 442 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 443 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE 444 | LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME 445 | THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 446 | 447 | 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN 448 | WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY 449 | AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU 450 | FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR 451 | CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE 452 | LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING 453 | RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A 454 | FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF 455 | SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 456 | DAMAGES. 457 | 458 | END OF TERMS AND CONDITIONS 459 | 460 | How to Apply These Terms to Your New Libraries 461 | 462 | If you develop a new library, and you want it to be of the greatest 463 | possible use to the public, we recommend making it free software that 464 | everyone can redistribute and change. You can do so by permitting 465 | redistribution under these terms (or, alternatively, under the terms of the 466 | ordinary General Public License). 467 | 468 | To apply these terms, attach the following notices to the library. It is 469 | safest to attach them to the start of each source file to most effectively 470 | convey the exclusion of warranty; and each file should have at least the 471 | "copyright" line and a pointer to where the full notice is found. 472 | 473 | {description} 474 | Copyright (C) {year} {fullname} 475 | 476 | This library is free software; you can redistribute it and/or 477 | modify it under the terms of the GNU Lesser General Public 478 | License as published by the Free Software Foundation; either 479 | version 2.1 of the License, or (at your option) any later version. 480 | 481 | This library is distributed in the hope that it will be useful, 482 | but WITHOUT ANY WARRANTY; without even the implied warranty of 483 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 484 | Lesser General Public License for more details. 485 | 486 | You should have received a copy of the GNU Lesser General Public 487 | License along with this library; if not, write to the Free Software 488 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 489 | USA 490 | 491 | Also add information on how to contact you by electronic and paper mail. 492 | 493 | You should also get your employer (if you work as a programmer) or your 494 | school, if any, to sign a "copyright disclaimer" for the library, if 495 | necessary. Here is a sample; alter the names: 496 | 497 | Yoyodyne, Inc., hereby disclaims all copyright interest in the 498 | library `Frob' (a library for tweaking knobs) written by James Random 499 | Hacker. 500 | 501 | {signature of Ty Coon}, 1 April 1990 502 | Ty Coon, President of Vice 503 | 504 | That's all there is to it! 505 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include lopocs/* 2 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Light Opensource |logo| PointCloud Server 2 | ######################################### 3 | 4 | |unix_build| |license| 5 | 6 | 7 | LOPoCS is a point cloud server written in 8 | Python, allowing to load Point Cloud from a PostgreSQL database thanks to the ``pgpointcloud`` 9 | extension. 10 | 11 | 12 | .. |logo| image:: docs/lopocs.png 13 | 14 | The current version of LOPoCS provides a way to load Point Cloud from PostgreSQL to the following viewers: 15 | 16 | * `Cesium `_ thanks to the `3DTiles `_ format 17 | * `Potree viewer `_ : viewer with LAZ compressed data. 18 | 19 | Note that LOPoCS is currently the only **3DTiles** server able to stream data from 20 | `pgpointcloud `_. This 21 | is possible thanks to the python module 22 | `py3dtiles `_. 23 | 24 | Developments are still going on to improve state-of-the-art algorithms and 25 | performances. 26 | 27 | `Video `_ 28 | 29 | `Online demonstration `_ 30 | 31 | Example using 3Dtiles/Cesium with data from `GrandLyon `_ 32 | 33 | .. image:: docs/grandlyon.png 34 | 35 | .. contents:: 36 | 37 | .. section-numbering:: 38 | 39 | 40 | Main features 41 | ============= 42 | 43 | * Command line tool to load data into PostgreSQL 44 | * Swagger API 45 | * Stream patches stored in PostgreSQL 46 | * Greyhound protocol support 47 | * 3DTiles standard support (partial) 48 | * Produce ready to use examples with Potree and Cesium 49 | 50 | Installation 51 | ============ 52 | 53 | Dependencies 54 | ------------ 55 | 56 | - python >= 3.4 57 | - gdal development headers (libgdal-dev) 58 | - pip (python3-pip) 59 | - virtualenv (python3-virtualenv) 60 | - `pgpointcloud `_ with lazperf enabled 61 | - `Morton Postgres extension `_ 62 | - `PDAL `_ (used by the LOPoCS loader) 63 | 64 | .. note:: The LOPoCS loader uses PDAL's "mortonorder" filter in "reverse" mode. The "reverse" mode was introduced in PDAL version 1.7.1, so make sure you use this version or higher. 65 | 66 | From sources 67 | ------------ 68 | 69 | .. code-block:: bash 70 | 71 | $ git clone https://github.com/Oslandia/lopocs 72 | $ cd lopocs 73 | $ virtualenv -p /usr/bin/python3 venv 74 | $ source venv/bin/activate 75 | (venv)$ pip install 'numpy==1.14.3' 76 | (venv)$ pip install -e . 77 | 78 | Configuration 79 | ============= 80 | 81 | You will find an example of a configuration file for lopocs in ``conf/lopocs.sample.yml`` 82 | 83 | You have to copy it to ``conf/lopocs.yml`` and fill with your values, lopocs will load it 84 | if this file exists. 85 | Another alternative is to set up the ``LOPOCS_SETTINGS`` environment variable to locate your configuration file. 86 | 87 | 88 | Usage 89 | ===== 90 | 91 | Prepare database 92 | ---------------- 93 | 94 | .. code-block:: bash 95 | 96 | $ createdb lopocs 97 | $ psql -d lopocs -c 'create extension postgis' 98 | $ psql -d lopocs -c 'create extension pointcloud' 99 | $ psql -d lopocs -c 'create extension pointcloud_postgis' 100 | $ psql -d lopocs -c 'create extension morton' 101 | 102 | Lopocs CLI 103 | ---------- 104 | 105 | You can invoke lopocs in your virtualenv to show help and list available subcommands 106 | 107 | .. code-block:: bash 108 | 109 | $ cd lopocs 110 | $ source venv/bin/activate 111 | (venv)$ lopocs 112 | 113 | Check installation 114 | ------------------ 115 | 116 | .. code-block:: bash 117 | 118 | (venv)$ lopocs check 119 | Pdal ... 1.4.0 120 | Pdal plugin pgpointcloud ... ok 121 | PostgreSQL ... 9.6.3 122 | PostGIS extension ... 2.3.1 123 | PgPointcloud extension ... 1.1.0 124 | PgPointcloud-PostGIS extension ... 1.0 125 | 126 | 127 | Demo data 128 | --------- 129 | 130 | .. code-block:: bash 131 | 132 | (venv)$ mkdir demos 133 | (venv)$ lopocs demo --work-dir demos/ --sample airport --cesium 134 | (venv)$ lopocs serve 135 | 136 | Copy/Paste the link at the end of the log in your browser and you will be able to see this: 137 | 138 | .. image:: docs/airport.png 139 | 140 | Swagger API 141 | ----------- 142 | 143 | Each viewer has specific expectations and communication protocol. So, the API is built to meet these specific needs. 144 | 145 | Currently, 2 kinds of formats are supported: 146 | 147 | - 3DTiles 148 | - Greyhound format (LAZ data with a footer indicating the number of points) 149 | 150 | LOPoCS is able to stream data up to 2 viewers: 151 | 152 | - Cesium with the 3DTiles format 153 | - Potree viewer with the Greyhound format 154 | 155 | LOPoCS provides its RESTful API through a Swagger UI by default on 156 | ``_ 157 | 158 | .. image:: docs/api.png 159 | 160 | Run tests 161 | ========= 162 | 163 | .. code-block:: bash 164 | 165 | (venv)$ pip install .[dev] 166 | (venv)$ py.test 167 | 168 | Licence 169 | ======= 170 | 171 | LGPL>2: `LICENSE `_. 172 | 173 | .. |unix_build| image:: https://img.shields.io/travis/Oslandia/lopocs/master.svg?style=flat-square&label=unix%20build 174 | :target: http://travis-ci.org/Oslandia/lopocs 175 | :alt: Build status of the master branch 176 | 177 | .. |license| image:: https://img.shields.io/badge/license-LGPL-blue.svg?style=flat-square 178 | :target: LICENSE 179 | :alt: Package license 180 | -------------------------------------------------------------------------------- /conf/lopocs.sample.yml: -------------------------------------------------------------------------------- 1 | flask: 2 | DEBUG: False 3 | PG_HOST: localhost 4 | PG_NAME: lopocs 5 | PG_PORT: 5432 6 | PG_USER: user 7 | PG_PASSWORD: **** 8 | CACHE_DIR: /home/user/.cache/lopocs 9 | -------------------------------------------------------------------------------- /conf/lopocs.uwsgi.sample.yml: -------------------------------------------------------------------------------- 1 | uwsgi: 2 | plugin: python3.4 3 | virtualenv: /home/user/.virtualenvs/lopocs 4 | master: true 5 | socket: localhost:5000 6 | module: lopocs.wsgi:app 7 | processes: 2 8 | enable-threads: true 9 | protocol: http 10 | need-app: true 11 | lazy-apps: true 12 | catch: exceptions=true 13 | env: LOPOCS_SETTINGS=/home/user/lopocs/conf/lopocs.yml 14 | -------------------------------------------------------------------------------- /docs/advanced_usage.rst: -------------------------------------------------------------------------------- 1 | 2 | ## Advanced usage 3 | 4 | If you want to manually fill the database without **lopocs_builder** (or if it 5 | doesn't work), you'll need some further explications. 6 | 7 | ### Download a LAS file 8 | 9 | ``` 10 | $ wget www.liblas.org/samples/LAS12_Sample_withRGB_Quick_Terrain_Modeler_fixed.las 11 | $ mv LAS12_Sample_withRGB_Quick_Terrain_Modeler_fixed.las airport.las 12 | ``` 13 | 14 | ### Initialize the database 15 | 16 | The first step is to create the database and load extensions: 17 | 18 | ``` 19 | $ createdb pc_airport 20 | $ psql pc_airport 21 | psql (9.5.1) 22 | Type "help" for help. 23 | 24 | pc_airport=# create extension postgis; 25 | CREATE EXTENSION 26 | pc_airport=# create extension pointcloud; 27 | CREATE EXTENSION 28 | pc_airport=# create extension pointcloud_postgis; 29 | CREATE EXTENSION 30 | pc_airport=# create extension morton; 31 | CREATE EXTENSION 32 | ``` 33 | 34 | ### Using PDAL to fill the database 35 | 36 | Firstly, you have to write a PDAL pipeline according to your format file, the 37 | spatial reference, and so on... But the chipper and midoc filter as well as the 38 | pgpointcloud writer are mandatory. 39 | 40 | The pipeline for the *airport.las* file is named *pipe.json* and looks like: 41 | 42 | ``` 43 | { 44 | "pipeline":[ 45 | { 46 | "type":"readers.las", 47 | "filename":"airport.las", 48 | "spatialreference":"EPSG:32616" 49 | } 50 | , 51 | { 52 | "type":"filters.chipper", 53 | "capacity":500 54 | }, 55 | { 56 | "type":"filters.midoc" 57 | }, 58 | { 59 | "type":"writers.pgpointcloud", 60 | "connection":"dbname=pc_airport", 61 | "table":"patchs", 62 | "compression":"lazperf", 63 | "srid":"32616", 64 | "overwrite":"false" 65 | } 66 | ] 67 | } 68 | ``` 69 | 70 | Then you can run PDAL: 71 | 72 | ``` 73 | $ pdal pipeline -i pipe.json 74 | ``` 75 | 76 | ### Morton indexing 77 | 78 | Once you have patchs of points in database thanks to PDAL, you have to 79 | compute a Morton code for each one of them: 80 | 81 | ``` 82 | $ psql pc_airport 83 | psql (9.5.1) 84 | Type "help" for help. 85 | 86 | pc_airport=# ALTER TABLE patchs add column morton bigint; 87 | ALTER TABLE 88 | pc_airport=# SELECT Morton_Update('patchs', 'pa::geometry', 'morton', 64, TRUE) 89 | SELECT 90 | pc_airport=# CREATE INDEX ON patchs(morton); 91 | CREATE INDEX 92 | ``` 93 | 94 | ### Configuration file for uWSGI and LOPoCS 95 | 96 | For LOPoCS running with uWSGI only (without web server such as Nginx), the 97 | configuration file looks like: 98 | 99 | ``` 100 | # uWSGI configuration: lopocs.uwsgi.yml 101 | uwsgi: 102 | virtualenv: lopocs/venv 103 | master: true 104 | socket: 127.0.0.1:5000 105 | protocol: http 106 | module: lopocs.wsgi:app 107 | processes: 4 108 | enable-threads: true 109 | lazy-apps: true 110 | need-app: true 111 | catch: exceptions=true 112 | env: LOPOCS_SETTINGS=lopocs.yml 113 | ``` 114 | 115 | ``` 116 | # LOPoCS configuration: lopocs.yml 117 | flask: 118 | DEBUG: True 119 | LOG_LEVEL: debug 120 | PG_HOST: localhost 121 | PG_USER: USER 122 | PG_NAME: pc_airport 123 | PG_PORT: 5432 124 | PG_COLUMN: pa 125 | PG_TABLE: patchs 126 | PG_PASSWORD: 127 | DEPTH: 6 128 | USE_MORTON: True 129 | CACHE_DIR: ~/.cache/lopocs 130 | STATS: False 131 | ``` 132 | 133 | So, if you want to run LOPoCS: 134 | 135 | ``` 136 | $ uwsgi -y lopocs.uwsgi.yml 137 | ``` 138 | 139 | 140 | ### [For Potree] Schemas in pgpointcloud 141 | 142 | Potree waits from the streaming server a specific point structure: 143 | 144 | ``` 145 | X: int32 scaled and offsetted 146 | Y: int32 scaled and offsetted 147 | Z: int32 scaled and offsetted 148 | Intensity: uint16 149 | Classification: uint8 150 | Red: uint16 151 | Green: uint16 152 | Blue: uint16 153 | ``` 154 | 155 | The offset is the center of the bounding box of your data. Note that it should 156 | be the same box sent by the */info* response coming from LOPoCS. To retrieve 157 | the boundaries: 158 | 159 | ``` 160 | $ pdal info --summary airport.las 161 | ... 162 | "bounds": 163 | { 164 | "X": 165 | { 166 | "max": 728998.1352, 167 | "min": 728262.8032 168 | }, 169 | "Y": 170 | { 171 | "max": 4677014.685, 172 | "min": 4676439.353 173 | }, 174 | "Z": 175 | { 176 | "max": 327.0779649, 177 | "min": 292.6479649 178 | } 179 | } 180 | ... 181 | ``` 182 | 183 | Thus: 184 | 185 | ``` 186 | OFFSET_X = 728262.803 + (728998.135 - 728262.803) / 2 = 728630.469 187 | OFFSET_Y = 4676439.353 + (4677014.685 - 4676439.353) / 2 = 4676727.019 188 | OFFSET_Z = 292.6479649 + (327.0779649 - 292.6479649) / 2 = 309.8629649 189 | ``` 190 | 191 | Then we have to build pointcloud schemas with these offsets and two different 192 | scales (0.1 and 0.01): 193 | 194 | ``` 195 | $ cp docs/potree_schema_scale_01.sql airport_schema_scale_01.sql 196 | $ sed -i -e "s@!XOFFSET!@728630.469@g" airport_schema_scale_01.sql 197 | $ sed -i -e "s@!YOFFSET!@4676727.019@g" airport_schema_scale_01.sql 198 | $ sed -i -e "s@!ZOFFSET!@309.8629649@g" airport_schema_scale_01.sql 199 | $ sed -i -e "s@!SRID!@32616@g" airport_schema_scale_01.sql 200 | $ cp docs/potree_schema_scale_001.sql airport_schema_scale_001.sql 201 | $ sed -i -e "s@!XOFFSET!@728630.469@g" airport_schema_scale_001.sql 202 | $ sed -i -e "s@!YOFFSET!@4676727.019@g" airport_schema_scale_001.sql 203 | $ sed -i -e "s@!ZOFFSET!@309.8629649@g" airport_schema_scale_001.sql 204 | $ sed -i -e "s@!SRID!@32616@g" airport_schema_scale_001.sql 205 | ``` 206 | 207 | These schemas have to be inserted in the database: 208 | 209 | ``` 210 | $ psql pc_airport -f airport_schema_scale_01.sql 211 | $ psql pc_airport -f airport_schema_scale_001.sql 212 | ``` 213 | 214 | ### [For Potree] Hierarchy computation 215 | 216 | A hierarchy, described in Json, is necessary for the Potree loader. 217 | 218 | If you want a full description of what a Greyhound hierarchy is, you can take 219 | a look [here](https://github.com/hobu/greyhound/blob/master/doc/clientDevelopment.rst). 220 | -------------------------------------------------------------------------------- /docs/airport.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/airport.png -------------------------------------------------------------------------------- /docs/api.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/api.png -------------------------------------------------------------------------------- /docs/api_3dtiles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/api_3dtiles.png -------------------------------------------------------------------------------- /docs/api_greyhound.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/api_greyhound.png -------------------------------------------------------------------------------- /docs/api_infos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/api_infos.png -------------------------------------------------------------------------------- /docs/grandlyon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/grandlyon.png -------------------------------------------------------------------------------- /docs/itowns_montreal1_header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/itowns_montreal1_header.png -------------------------------------------------------------------------------- /docs/lopocs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/lopocs.png -------------------------------------------------------------------------------- /docs/potree_schema_scale_001.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO pointcloud_formats (pcid, srid, schema) VALUES (3, !SRID!, 2 | ' 3 | 4 | 5 | 1 6 | 4 7 | X coordinate 8 | X 9 | int32_t 10 | 0.01 11 | !XOFFSET! 12 | true 13 | 14 | 15 | 2 16 | 4 17 | Y coordinate 18 | Y 19 | int32_t 20 | 0.01 21 | !YOFFSET! 22 | true 23 | 24 | 25 | 3 26 | 4 27 | Z coordinate 28 | Z 29 | int32_t 30 | 0.01 31 | !ZOFFSET! 32 | true 33 | 34 | 35 | 4 36 | 2 37 | Representation of the pulse return magnitude 38 | Intensity 39 | uint16_t 40 | true 41 | 42 | 43 | 5 44 | 1 45 | ASPRS classification. 0 for no classification. 46 | Classification 47 | uint8_t 48 | true 49 | 50 | 51 | 6 52 | 2 53 | Red image channel value 54 | Red 55 | uint16_t 56 | true 57 | 58 | 59 | 7 60 | 2 61 | Green image channel value 62 | Green 63 | uint16_t 64 | true 65 | 66 | 67 | 8 68 | 2 69 | Blue image channel value 70 | Blue 71 | uint16_t 72 | true 73 | 74 | 75 | none 76 | point 77 | '); 78 | -------------------------------------------------------------------------------- /docs/potree_schema_scale_01.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO pointcloud_formats (pcid, srid, schema) VALUES (2, !SRID!, 2 | ' 3 | 4 | 5 | 1 6 | 4 7 | X coordinate 8 | X 9 | int32_t 10 | 0.1 11 | !XOFFSET! 12 | true 13 | 14 | 15 | 2 16 | 4 17 | Y coordinate 18 | Y 19 | int32_t 20 | 0.1 21 | !YOFFSET! 22 | true 23 | 24 | 25 | 3 26 | 4 27 | Z coordinate 28 | Z 29 | int32_t 30 | 0.1 31 | !ZOFFSET! 32 | true 33 | 34 | 35 | 4 36 | 2 37 | Representation of the pulse return magnitude 38 | Intensity 39 | uint16_t 40 | true 41 | 42 | 43 | 5 44 | 1 45 | ASPRS classification. 0 for no classification. 46 | Classification 47 | uint8_t 48 | true 49 | 50 | 51 | 6 52 | 2 53 | Red image channel value 54 | Red 55 | uint16_t 56 | true 57 | 58 | 59 | 7 60 | 2 61 | Green image channel value 62 | Green 63 | uint16_t 64 | true 65 | 66 | 67 | 8 68 | 2 69 | Blue image channel value 70 | Blue 71 | uint16_t 72 | true 73 | 74 | 75 | none 76 | point 77 | '); 78 | -------------------------------------------------------------------------------- /docs/stsulpice.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/docs/stsulpice.png -------------------------------------------------------------------------------- /lopocs/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import io 3 | import os 4 | import sys 5 | from pathlib import Path 6 | 7 | from flask import Flask, Blueprint 8 | from yaml import load as yload 9 | from yaml import FullLoader 10 | 11 | from lopocs.app import api 12 | from lopocs.database import Session 13 | from lopocs.stats import Stats 14 | from lopocs.conf import Config 15 | 16 | # lopocs version 17 | __version__ = '0.1.dev0' 18 | 19 | 20 | def load_yaml_config(filename): 21 | """ 22 | Open Yaml file, load content for flask config and returns it as a python dict 23 | """ 24 | content = io.open(filename, 'r').read() 25 | return yload(content, Loader=FullLoader).get('flask', {}) 26 | 27 | 28 | def create_app(env='Defaults'): 29 | """ 30 | Creates application. 31 | :returns: flask application instance 32 | """ 33 | app = Flask(__name__) 34 | cfgfile = os.environ.get('LOPOCS_SETTINGS') 35 | if cfgfile: 36 | app.config.update(load_yaml_config(cfgfile)) 37 | else: 38 | try: 39 | cfgfile = (Path(__file__).parent / '..' / 'conf' / 'lopocs.yml').resolve() 40 | except FileNotFoundError: 41 | app.logger.critical('no config file found !!') 42 | sys.exit(1) 43 | app.config.update(load_yaml_config(str(cfgfile))) 44 | 45 | app.logger.debug('loading config from {}'.format(cfgfile)) 46 | 47 | # load extensions 48 | if 'URL_PREFIX' in app.config: 49 | blueprint = Blueprint('api', __name__, url_prefix=app.config['URL_PREFIX']) 50 | else: 51 | blueprint = Blueprint('api', __name__) 52 | 53 | api.init_app(blueprint) 54 | app.register_blueprint(blueprint) 55 | Session.init_app(app) 56 | Config.init(app.config) 57 | 58 | if Config.STATS: 59 | Stats.init() 60 | 61 | return app 62 | -------------------------------------------------------------------------------- /lopocs/app.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from flask_restplus import Api, Resource, reqparse 3 | 4 | from .greyhound import GreyhoundInfo, GreyhoundRead, GreyhoundHierarchy 5 | from .threedtiles import ThreeDTilesInfo, ThreeDTilesRead 6 | from .database import Session 7 | 8 | api = Api( 9 | version='0.1', 10 | title='LOPoCS API', 11 | description='API for accessing LOPoCS' 12 | ) 13 | 14 | 15 | # global namespace 16 | gns = api.namespace('infos', description='Information about LOPoCS') 17 | 18 | 19 | @gns.route("/global") 20 | class InfosGlobal(Resource): 21 | 22 | def get(self): 23 | return "Light OpenSource PointCloud Server by Oslandia" 24 | 25 | 26 | @gns.route("/contact") 27 | class InfosContact(Resource): 28 | 29 | def get(self): 30 | return "infos+li3ds@oslandia.com" 31 | 32 | 33 | @gns.route("/online") 34 | class InfosOnline(Resource): 35 | 36 | def get(self): 37 | return "Congratulation, LOPoCS is online!!!" 38 | 39 | 40 | @gns.route("/sources") 41 | class Sources(Resource): 42 | 43 | def get(self): 44 | """List available resources 45 | """ 46 | Session.clear_catalog() 47 | Session.fill_catalog() 48 | resp = [ 49 | values.asjson() 50 | for key, values in Session.catalog.items() 51 | ] 52 | return resp 53 | 54 | 55 | # Greyhound namespace 56 | ghd_ns = api.namespace('greyhound', description='Greyhound protocol') 57 | 58 | 59 | def validate_resource(resource): 60 | '''Resource is a table name with schema and column name combined as 61 | follow : schema.table.column 62 | ''' 63 | if resource.count('.') != 2: 64 | api.abort(404, "resource must be in the form schema.table.column") 65 | 66 | table = resource[:resource.rfind('.')] 67 | column = resource.split('.')[-1] 68 | return table, column 69 | 70 | 71 | @ghd_ns.route("//info") 72 | class Info(Resource): 73 | 74 | def get(self, resource): 75 | table, column = validate_resource(resource) 76 | return GreyhoundInfo(table, column) 77 | 78 | 79 | ghd_read = reqparse.RequestParser() 80 | ghd_read.add_argument('depthBegin', type=int, required=False) 81 | ghd_read.add_argument('depthEnd', type=int, required=False) 82 | ghd_read.add_argument('depth', type=int, required=False) 83 | ghd_read.add_argument('bounds', type=str, required=False) 84 | ghd_read.add_argument('scale', type=float, required=False) 85 | ghd_read.add_argument('offset', type=str, required=False) 86 | ghd_read.add_argument('schema', type=str, required=False) 87 | ghd_read.add_argument('compress', type=bool, required=False) 88 | 89 | 90 | @ghd_ns.route("//read") 91 | class Read(Resource): 92 | 93 | @api.expect(ghd_read, validate=True) 94 | def get(self, resource): 95 | table, column = validate_resource(resource) 96 | args = ghd_read.parse_args() 97 | return GreyhoundRead( 98 | table, 99 | column, 100 | args.get('offset'), 101 | args.get('scale'), 102 | args.get('bounds'), 103 | args.get('depth'), 104 | args.get('depthBegin'), 105 | args.get('depthEnd'), 106 | args.get('schema'), 107 | args.get('compress')) 108 | 109 | 110 | ghd_hierarchy = reqparse.RequestParser() 111 | ghd_hierarchy.add_argument('depthBegin', type=int) 112 | ghd_hierarchy.add_argument('depthEnd', type=int) 113 | ghd_hierarchy.add_argument('bounds', type=str, required=True) 114 | ghd_hierarchy.add_argument('scale', type=float) 115 | ghd_hierarchy.add_argument('offset', type=str) 116 | 117 | 118 | @ghd_ns.route("//hierarchy") 119 | class Hierarchy(Resource): 120 | 121 | @ghd_ns.expect(ghd_hierarchy, validate=True) 122 | def get(self, resource): 123 | table, column = validate_resource(resource) 124 | args = ghd_hierarchy.parse_args() 125 | return GreyhoundHierarchy( 126 | table, column, 127 | args.get('bounds'), 128 | args.get('depthBegin'), args.get('depthEnd'), 129 | args.get('scale'), args.get('offset')) 130 | 131 | 132 | # 3Dtiles namespace 133 | threedtiles_ns = api.namespace('3dtiles', description='3DTiles format') 134 | 135 | 136 | @threedtiles_ns.route("//info") 137 | class ThreeDTilesInfoRoute(Resource): 138 | 139 | def get(self, resource): 140 | table, column = validate_resource(resource) 141 | return ThreeDTilesInfo(table, column) 142 | 143 | 144 | threedtiles_read = reqparse.RequestParser() 145 | threedtiles_read.add_argument('bounds', type=str, required=True) 146 | threedtiles_read.add_argument('lod', type=int, required=True) 147 | 148 | 149 | @threedtiles_ns.route("//read.pnts") 150 | class ThreeDTilesReadRoute(Resource): 151 | 152 | @threedtiles_ns.expect(threedtiles_read, validate=True) 153 | def get(self, resource): 154 | table, column = validate_resource(resource) 155 | args = threedtiles_read.parse_args() 156 | return ThreeDTilesRead( 157 | table, column, 158 | args.get('bounds'), 159 | args.get('lod') 160 | ) 161 | -------------------------------------------------------------------------------- /lopocs/cesium.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | cesium_page = """ 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | Demo: {resource} 14 | 15 | 21 | 22 | 23 |
24 | 51 | 52 | 53 | """ 54 | -------------------------------------------------------------------------------- /lopocs/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import io 4 | import os 5 | import re 6 | import sys 7 | import shlex 8 | import json 9 | from zipfile import ZipFile 10 | from datetime import datetime 11 | from pathlib import Path 12 | from subprocess import check_call, call, check_output, CalledProcessError, DEVNULL 13 | 14 | import click 15 | import requests 16 | from flask_cors import CORS 17 | from pyproj import Proj, transform 18 | 19 | from lopocs import __version__ 20 | from lopocs import create_app, greyhound, threedtiles 21 | from lopocs.database import Session 22 | from lopocs.potreeschema import potree_schema 23 | from lopocs.potreeschema import potree_page 24 | from lopocs.cesium import cesium_page 25 | from lopocs.utils import compute_scale_for_cesium 26 | 27 | 28 | samples = { 29 | 'airport': 'http://www.liblas.org/samples/LAS12_Sample_withRGB_Quick_Terrain_Modeler_fixed.las', 30 | 'sthelens': 'http://www.liblas.org/samples/st-helens.las', 31 | 'lyon': (3946, 'http://3d.oslandia.com/lyon.laz') 32 | } 33 | 34 | PDAL_PIPELINE = """ 35 | {{ 36 | "pipeline": [ 37 | {{ 38 | "type": "readers.{extension}", 39 | "filename":"{realfilename}" 40 | }}, 41 | {{ 42 | "type": "filters.chipper", 43 | "capacity": "{capacity}" 44 | }}, 45 | {reproject} 46 | {{ 47 | "type": "filters.mortonorder", 48 | "reverse": "true" 49 | }}, 50 | {{ 51 | "type":"writers.pgpointcloud", 52 | "connection":"dbname={pg_name} host={pg_host} port={pg_port} user={pg_user} password={pg_password}", 53 | "schema": "{schema}", 54 | "table":"{tab}", 55 | "compression":"none", 56 | "srid":"{srid}", 57 | "overwrite":"true", 58 | "column": "{column}", 59 | "scale_x": "{scale_x}", 60 | "scale_y": "{scale_y}", 61 | "scale_z": "{scale_z}", 62 | "offset_x": "{offset_x}", 63 | "offset_y": "{offset_y}", 64 | "offset_z": "{offset_z}" 65 | }} 66 | ] 67 | }}""" 68 | 69 | 70 | def fatal(message): 71 | '''print error and exit''' 72 | click.echo('\nFATAL: {}'.format(message), err=True) 73 | sys.exit(1) 74 | 75 | 76 | def pending(msg, nl=False): 77 | click.echo('[{}] {} ... '.format( 78 | datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 79 | msg 80 | ), nl=nl) 81 | 82 | 83 | def green(message): 84 | click.secho(message.replace('\n', ''), fg='green') 85 | 86 | 87 | def ok(mess=None): 88 | if mess: 89 | click.secho('{} : '.format(mess.replace('\n', '')), nl=False) 90 | click.secho('ok', fg='green') 91 | 92 | 93 | def ko(mess=None): 94 | if mess: 95 | click.secho('{} : '.format(mess.replace('\n', '')), nl=False) 96 | click.secho('ko', fg='red') 97 | 98 | 99 | def download(label, url, dest): 100 | ''' 101 | download url using requests and a progressbar 102 | ''' 103 | r = requests.get(url, stream=True) 104 | length = int(r.headers['content-length']) 105 | 106 | chunk_size = 512 107 | iter_size = 0 108 | with io.open(dest, 'wb') as fd: 109 | with click.progressbar(length=length, label=label) as bar: 110 | for chunk in r.iter_content(chunk_size): 111 | fd.write(chunk) 112 | iter_size += chunk_size 113 | bar.update(chunk_size) 114 | 115 | 116 | def print_version(ctx, param, value): 117 | if not value or ctx.resilient_parsing: 118 | return 119 | click.echo('LOPoCS version {}'.format(__version__)) 120 | click.echo('') 121 | ctx.exit() 122 | 123 | 124 | @click.group() 125 | @click.option('--version', help='show version', is_flag=True, expose_value=False, callback=print_version) 126 | def cli(): 127 | '''lopocs command line tools''' 128 | pass 129 | 130 | 131 | @click.option('--host', help='The hostname to listen on (default is 127.0.0.1)', 132 | default='127.0.0.1', type=str) 133 | @click.option('--port', help='The port to listen on (default is 5000)', 134 | default=5000, type=int) 135 | @cli.command() 136 | def serve(host, port): 137 | '''run lopocs server (development usage)''' 138 | app = create_app() 139 | CORS(app) 140 | app.run(host=host, port=port) 141 | 142 | 143 | def cmd_rt(message, command): 144 | '''wrapper around call function 145 | ''' 146 | click.echo('{} ... '.format(message), nl=False) 147 | rt = call(command, shell=True) 148 | if rt != 0: 149 | ko() 150 | return 151 | ok() 152 | 153 | 154 | def cmd_output(message, command): 155 | '''wrapper check_call function 156 | ''' 157 | click.echo('{} ... '.format(message), nl=False) 158 | try: 159 | output = check_output(shlex.split(command)).decode() 160 | green(output) 161 | except Exception as exc: 162 | ko(str(exc)) 163 | 164 | 165 | def cmd_pg(message, request): 166 | '''wrapper around a session query 167 | ''' 168 | click.echo('{} ... '.format(message), nl=False) 169 | try: 170 | result = Session.query(request) 171 | if not result: 172 | raise Exception('Not found') 173 | green(result[0][0]) 174 | except Exception as exc: 175 | ko(str(exc)) 176 | 177 | 178 | @cli.command() 179 | def check(): 180 | '''check lopocs configuration and dependencies''' 181 | try: 182 | app = create_app() 183 | except Exception as exc: 184 | fatal(str(exc)) 185 | 186 | if not app: 187 | fatal("it appears that you don't have any configuration file") 188 | 189 | # pdal 190 | cmd_output('Pdal', 'pdal-config --version') 191 | cmd_rt('Pdal plugin pgpointcloud', "test -e `pdal-config --plugin-dir`/libpdal_plugin_writer_pgpointcloud.so") 192 | 193 | # postgresql and extensions 194 | cmd_pg('PostgreSQL', 'show server_version') 195 | cmd_pg('PostGIS extension', "select default_version from pg_available_extensions where name = 'postgis'") 196 | cmd_pg('PgPointcloud extension', "select default_version from pg_available_extensions where name = 'pointcloud'") 197 | cmd_pg('PgPointcloud-PostGIS extension', "select default_version from pg_available_extensions where name = 'pointcloud_postgis'") 198 | 199 | 200 | @click.option('--table', required=True, help='table name to store pointclouds, considered in public schema if no prefix provided') 201 | @click.option('--column', help="column name to store patches", default="points", type=str) 202 | @click.option('--work-dir', type=click.Path(exists=True), required=True, help="working directory where temporary files will be saved") 203 | @click.option('--server-url', type=str, help="server url for lopocs", default="http://localhost:5000") 204 | @click.option('--capacity', type=int, default=400, help="number of points in a pcpatch") 205 | @click.option('--potree', 'usewith', help="load data for use with greyhound/potree", flag_value='potree') 206 | @click.option('--cesium', 'usewith', help="load data for use with use 3dtiles/cesium ", default=True, flag_value='cesium') 207 | @click.option('--srid', help="set Spatial Reference Identifier (EPSG code) for the source file", default=0, type=int) 208 | @click.argument('filename', type=click.Path(exists=True)) 209 | @cli.command() 210 | def load(filename, table, column, work_dir, server_url, capacity, usewith, srid): 211 | '''load pointclouds data using pdal and add metadata needed by lopocs''' 212 | _load(filename, table, column, work_dir, server_url, capacity, usewith, srid) 213 | 214 | 215 | def _load(filename, table, column, work_dir, server_url, capacity, usewith, srid=0): 216 | '''load pointclouds data using pdal and add metadata needed by lopocs''' 217 | # intialize flask application 218 | app = create_app() 219 | 220 | filename = Path(filename) 221 | work_dir = Path(work_dir) 222 | extension = filename.suffix[1:].lower() 223 | # laz uses las reader in PDAL 224 | extension = extension if extension != 'laz' else 'las' 225 | basename = filename.stem 226 | basedir = filename.parent 227 | 228 | pending('Creating metadata table') 229 | Session.create_pointcloud_lopocs_table() 230 | ok() 231 | 232 | pending('Reading summary with PDAL') 233 | json_path = os.path.join( 234 | str(work_dir.resolve()), 235 | '{basename}_{table}_pipeline.json'.format(**locals())) 236 | 237 | # tablename should be always prefixed 238 | if '.' not in table: 239 | table = 'public.{}'.format(table) 240 | 241 | cmd = "pdal info --summary {}".format(filename) 242 | try: 243 | output = check_output(shlex.split(cmd)) 244 | except CalledProcessError as e: 245 | fatal(e) 246 | 247 | summary = json.loads(output.decode())['summary'] 248 | ok() 249 | 250 | if 'srs' not in summary and not srid: 251 | fatal('Unable to find the spatial reference system, please provide a SRID with option --srid') 252 | 253 | if not srid: 254 | # find authority code in wkt string 255 | srid = re.findall('EPSG","(\d+)"', summary['srs']['wkt'])[-1] 256 | 257 | p = Proj(init='epsg:{}'.format(srid)) 258 | 259 | if p.is_latlong(): 260 | # geographic 261 | scale_x, scale_y, scale_z = (1e-6, 1e-6, 1e-2) 262 | else: 263 | # projection or geocentric 264 | scale_x, scale_y, scale_z = (0.01, 0.01, 0.01) 265 | 266 | offset_x = summary['bounds']['X']['min'] + (summary['bounds']['X']['max'] - summary['bounds']['X']['min']) / 2 267 | offset_y = summary['bounds']['Y']['min'] + (summary['bounds']['Y']['max'] - summary['bounds']['Y']['min']) / 2 268 | offset_z = summary['bounds']['Z']['min'] + (summary['bounds']['Z']['max'] - summary['bounds']['Z']['min']) / 2 269 | 270 | reproject = "" 271 | 272 | if usewith == 'cesium': 273 | from_srid = srid 274 | # cesium only use epsg:4978, so we must reproject before loading into pg 275 | srid = 4978 276 | 277 | reproject = """ 278 | {{ 279 | "type":"filters.reprojection", 280 | "in_srs":"EPSG:{from_srid}", 281 | "out_srs":"EPSG:{srid}" 282 | }},""".format(**locals()) 283 | # transform bounds in new coordinate system 284 | pini = Proj(init='epsg:{}'.format(from_srid)) 285 | pout = Proj(init='epsg:{}'.format(srid)) 286 | # recompute offset in new space and start at 0 287 | pending('Reprojected bounds', nl=True) 288 | # xmin, ymin, zmin = transform(pini, pout, offset_x, offset_y, offset_z) 289 | xmin, ymin, zmin = transform(pini, pout, summary['bounds']['X']['min'], summary['bounds']['Y']['min'], summary['bounds']['Z']['min']) 290 | xmax, ymax, zmax = transform(pini, pout, summary['bounds']['X']['max'], summary['bounds']['Y']['max'], summary['bounds']['Z']['max']) 291 | offset_x, offset_y, offset_z = xmin, ymin, zmin 292 | click.echo('{} < x < {}'.format(xmin, xmax)) 293 | click.echo('{} < y < {}'.format(ymin, ymax)) 294 | click.echo('{} < z < {} '.format(zmin, zmax), nl=False) 295 | ok() 296 | pending('Computing best scales for cesium') 297 | # override scales for cesium if possible we try to use quantized positions 298 | scale_x = min(compute_scale_for_cesium(xmin, xmax), 1) 299 | scale_y = min(compute_scale_for_cesium(ymin, ymax), 1) 300 | scale_z = min(compute_scale_for_cesium(zmin, zmax), 1) 301 | ok('[{}, {}, {}]'.format(scale_x, scale_y, scale_z)) 302 | 303 | pg_host = app.config['PG_HOST'] 304 | pg_name = app.config['PG_NAME'] 305 | pg_port = app.config['PG_PORT'] 306 | pg_user = app.config['PG_USER'] 307 | pg_password = app.config['PG_PASSWORD'] 308 | realfilename = str(filename.resolve()) 309 | schema, tab = table.split('.') 310 | 311 | pending('Loading point clouds into database') 312 | 313 | with io.open(json_path, 'w') as json_file: 314 | json_file.write(PDAL_PIPELINE.format(**locals())) 315 | 316 | cmd = "pdal pipeline {}".format(json_path) 317 | 318 | try: 319 | check_call(shlex.split(cmd), stderr=DEVNULL, stdout=DEVNULL) 320 | except CalledProcessError as e: 321 | fatal(e) 322 | ok() 323 | 324 | pending("Creating indexes") 325 | Session.execute(""" 326 | create index on {table} using gist(pc_envelopegeometry(points)); 327 | alter table {table} add column morton bigint; 328 | select Morton_Update('{table}', 'points', 'morton', 128, TRUE); 329 | create index on {table}(morton); 330 | """.format(**locals())) 331 | ok() 332 | 333 | pending("Adding metadata for lopocs") 334 | Session.update_metadata( 335 | table, column, srid, scale_x, scale_y, scale_z, 336 | offset_x, offset_y, offset_z 337 | ) 338 | lpsession = Session(table, column) 339 | ok() 340 | 341 | # retrieve boundingbox 342 | fullbbox = lpsession.boundingbox 343 | bbox = [ 344 | fullbbox['xmin'], fullbbox['ymin'], fullbbox['zmin'], 345 | fullbbox['xmax'], fullbbox['ymax'], fullbbox['zmax'] 346 | ] 347 | 348 | if usewith == 'potree': 349 | lod_min = 0 350 | lod_max = 5 351 | # add schema currently used by potree (version 1.5RC) 352 | Session.add_output_schema( 353 | table, column, 0.01, 0.01, 0.01, 354 | offset_x, offset_y, offset_z, srid, potree_schema 355 | ) 356 | cache_file = ( 357 | "{0}_{1}_{2}_{3}_{4}.hcy".format( 358 | lpsession.table, 359 | lpsession.column, 360 | lod_min, 361 | lod_max, 362 | '_'.join(str(e) for e in bbox) 363 | ) 364 | ) 365 | pending("Building greyhound hierarchy") 366 | new_hcy = greyhound.build_hierarchy_from_pg( 367 | lpsession, lod_min, lod_max, bbox 368 | ) 369 | greyhound.write_in_cache(new_hcy, cache_file) 370 | ok() 371 | create_potree_page(str(work_dir.resolve()), server_url, table, column) 372 | 373 | if usewith == 'cesium': 374 | pending("Building 3Dtiles tileset") 375 | hcy = threedtiles.build_hierarchy_from_pg( 376 | lpsession, server_url, bbox 377 | ) 378 | 379 | tileset = os.path.join(str(work_dir.resolve()), 'tileset-{}.{}.json'.format(table, column)) 380 | 381 | with io.open(tileset, 'wb') as out: 382 | out.write(hcy.encode()) 383 | ok() 384 | create_cesium_page(str(work_dir.resolve()), table, column) 385 | 386 | 387 | @click.option('--table', required=True, help='table name to store pointclouds, considered in public schema if no prefix provided') 388 | @click.option('--column', help="column name to store patches", default="points", type=str) 389 | @click.option('--work-dir', type=click.Path(exists=True), required=True, help="working directory where temporary files will be saved") 390 | @click.option('--server-url', type=str, help="server url for lopocs", default="http://localhost:5000") 391 | @cli.command() 392 | def tileset(table, column, server_url, work_dir): 393 | """ 394 | (Re)build a tileset.json for a given table 395 | """ 396 | # intialize flask application 397 | create_app() 398 | 399 | work_dir = Path(work_dir) 400 | 401 | if '.' not in table: 402 | table = 'public.{}'.format(table) 403 | 404 | lpsession = Session(table, column) 405 | # initialize range for level of details 406 | fullbbox = lpsession.boundingbox 407 | bbox = [ 408 | fullbbox['xmin'], fullbbox['ymin'], fullbbox['zmin'], 409 | fullbbox['xmax'], fullbbox['ymax'], fullbbox['zmax'] 410 | ] 411 | pending('Building tileset from database') 412 | hcy = threedtiles.build_hierarchy_from_pg( 413 | lpsession, server_url, bbox 414 | ) 415 | ok() 416 | tileset = os.path.join(str(work_dir.resolve()), 'tileset-{}.{}.json'.format(table, column)) 417 | pending('Writing tileset to disk') 418 | with io.open(tileset, 'wb') as out: 419 | out.write(hcy.encode()) 420 | ok() 421 | 422 | 423 | def create_potree_page(work_dir, server_url, tablename, column): 424 | '''Create an html demo page with potree viewer 425 | ''' 426 | # get potree build 427 | potree = os.path.join(work_dir, 'potree') 428 | potreezip = os.path.join(work_dir, 'potree.zip') 429 | if not os.path.exists(potree): 430 | download('Getting potree code', 'http://3d.oslandia.com/potree.zip', potreezip) 431 | # unzipping content 432 | with ZipFile(potreezip) as myzip: 433 | myzip.extractall(path=work_dir) 434 | tablewschema = tablename.split('.')[-1] 435 | sample_page = os.path.join(work_dir, 'potree-{}.html'.format(tablewschema)) 436 | abs_sample_page = str(Path(sample_page).absolute()) 437 | pending('Creating a potree demo page : file://{}'.format(abs_sample_page)) 438 | resource = '{}.{}'.format(tablename, column) 439 | server_url = server_url.replace('http://', '') 440 | with io.open(sample_page, 'wb') as html: 441 | html.write(potree_page.format(resource=resource, server_url=server_url).encode()) 442 | ok() 443 | 444 | 445 | def create_cesium_page(work_dir, tablename, column): 446 | '''Create an html demo page with cesium viewer 447 | ''' 448 | cesium = os.path.join(work_dir, 'cesium') 449 | cesiumzip = os.path.join(work_dir, 'cesium.zip') 450 | if not os.path.exists(cesium): 451 | download('Getting cesium code', 'http://3d.oslandia.com/cesium.zip', cesiumzip) 452 | # unzipping content 453 | with ZipFile(cesiumzip) as myzip: 454 | myzip.extractall(path=work_dir) 455 | tablewschema = tablename.split('.')[-1] 456 | sample_page = os.path.join(work_dir, 'cesium-{}.html'.format(tablewschema)) 457 | abs_sample_page = str(Path(sample_page).absolute()) 458 | pending('Creating a cesium demo page : file://{}'.format(abs_sample_page)) 459 | resource = '{}.{}'.format(tablename, column) 460 | with io.open(sample_page, 'wb') as html: 461 | html.write(cesium_page.format(resource=resource).encode()) 462 | ok() 463 | 464 | 465 | @cli.command() 466 | @click.option('--sample', help="sample data available", default="airport", type=click.Choice(samples.keys())) 467 | @click.option('--work-dir', type=click.Path(exists=True), required=True, help="working directory where sample files will be saved") 468 | @click.option('--server-url', type=str, help="server url for lopocs", default="http://localhost:5000") 469 | @click.option('--potree', 'usewith', help="load data for using with greyhound/potree", flag_value='potree') 470 | @click.option('--cesium', 'usewith', help="load data for using with 3dtiles/cesium ", default=True, flag_value='cesium') 471 | def demo(sample, work_dir, server_url, usewith): 472 | ''' 473 | download sample lidar data, load it into pgpointcloud 474 | ''' 475 | srid = None 476 | if isinstance(samples[sample], (list, tuple)): 477 | # srid given 478 | srid = samples[sample][0] 479 | download_link = samples[sample][1] 480 | else: 481 | download_link = samples[sample] 482 | filepath = Path(download_link) 483 | pending('Using sample data {}: {}'.format(sample, filepath.name)) 484 | dest = os.path.join(work_dir, filepath.name) 485 | ok() 486 | 487 | if not os.path.exists(dest): 488 | download('Downloading sample', download_link, dest) 489 | 490 | # now load data 491 | if srid: 492 | _load(dest, sample, 'points', work_dir, server_url, 400, usewith, srid=srid) 493 | else: 494 | _load(dest, sample, 'points', work_dir, server_url, 400, usewith) 495 | 496 | click.echo( 497 | 'Now you can test lopocs server by executing "lopocs serve"' 498 | .format(sample) 499 | ) 500 | -------------------------------------------------------------------------------- /lopocs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | 5 | 6 | class Config(object): 7 | 8 | DEPTH = 6 9 | CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "lopocs") 10 | ROOT_HCY = None 11 | MAX_PATCHS_PER_QUERY = None 12 | MAX_POINTS_PER_PATCH = None 13 | USE_MORTON = True 14 | DEBUG = False 15 | STATS = False 16 | STATS_SERVER_PORT = 6379 17 | 18 | CESIUM_COLOR = "colors" 19 | 20 | @classmethod 21 | def init(cls, config): 22 | 23 | if 'DEPTH' in config: 24 | cls.DEPTH = config['DEPTH'] 25 | 26 | if 'CACHE_DIR' in config: 27 | cls.CACHE_DIR = config['CACHE_DIR'] 28 | if not os.path.isdir(cls.CACHE_DIR): 29 | os.makedirs(cls.CACHE_DIR) 30 | 31 | if 'ROOT_HCY' in config: 32 | cls.ROOT_HCY = config['ROOT_HCY'] 33 | 34 | if 'MAX_POINTS_PER_PATCH' in config: 35 | cls.MAX_POINTS_PER_PATCH = config['MAX_POINTS_PER_PATCH'] 36 | 37 | if 'USE_MORTON' in config: 38 | cls.USE_MORTON = config['USE_MORTON'] 39 | 40 | if 'DEBUG' in config: 41 | cls.DEBUG = config['DEBUG'] 42 | 43 | if 'STATS' in config: 44 | cls.STATS = config['STATS'] 45 | 46 | if 'STATS_SERVER_PORT' in config: 47 | cls.STATS_SERVER_PORT = config['STATS_SERVER_PORT'] 48 | 49 | if 'CESIUM_COLOR' in config: 50 | cls.CESIUM_COLOR = config['CESIUM_COLOR'] 51 | -------------------------------------------------------------------------------- /lopocs/database.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from multiprocessing import cpu_count 3 | from collections import defaultdict 4 | from contextlib import contextmanager 5 | from packaging import version 6 | 7 | import psycopg2.extras 8 | import psycopg2.extensions 9 | from psycopg2.extras import Json 10 | from psycopg2.pool import ThreadedConnectionPool 11 | from osgeo.osr import SpatialReference 12 | 13 | from .utils import iterable2pgarray, list_from_str_box, greyhound_types 14 | from .potreeschema import create_pointcloud_schema 15 | 16 | psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) 17 | 18 | 19 | LOPOCS_TABLES_QUERY = """ 20 | create table if not exists pointcloud_lopocs ( 21 | id serial primary key 22 | , schematable varchar 23 | , "column" varchar 24 | , srid integer 25 | , max_patches_per_query integer default 4096 26 | , max_points_per_patch integer default NULL 27 | , bbox jsonb 28 | , constraint uniq_table_col UNIQUE (schematable, "column") 29 | , constraint check_schematable_exists 30 | CHECK (to_regclass(schematable) is not null) 31 | ); 32 | create table if not exists pointcloud_lopocs_outputs ( 33 | id integer references pointcloud_lopocs(id) on delete cascade 34 | , pcid integer references pointcloud_formats(pcid) on delete cascade 35 | , scales float[3] 36 | , offsets float[3] 37 | , point_schema jsonb 38 | , stored boolean 39 | , bbox float[6] 40 | , constraint uniqschema UNIQUE (id, pcid) 41 | ); 42 | -- trick to add a partial constraint 43 | -- only one schema is used to store patches 44 | create unique index if not exists uniqidx_pcid_stored 45 | on pointcloud_lopocs_outputs (id, pcid, stored) where (stored is true); 46 | 47 | -- used to clean pointcloud_lopocs table when referenced table no longer exists 48 | create or replace function clean_lopocs() returns void 49 | language sql as 50 | 'delete from pointcloud_lopocs where to_regclass(schematable) is null'; 51 | """ 52 | 53 | # get a list of outputs formats available 54 | LOPOCS_OUTPUTS_QUERY = """ 55 | select 56 | min(pl.schematable) 57 | , min(pl."column") 58 | , min(pl.srid) 59 | , min(pc.pcid) 60 | , array_agg(plo.pcid) 61 | , array_agg(plo.scales) 62 | , array_agg(plo.offsets) 63 | , array_agg(plo.point_schema) 64 | , array_agg(plo.bbox) 65 | , array_agg(plo.stored) 66 | , min(pl.max_patches_per_query) 67 | , min(pl.max_points_per_patch) 68 | , pl.bbox 69 | from pointcloud_lopocs pl 70 | join pointcloud_columns pc 71 | on concat(pc."schema", '.', pc."table") = pl.schematable 72 | and pc."column" = pl."column" 73 | join pointcloud_lopocs_outputs plo on plo.id = pl.id 74 | where 75 | to_regclass(schematable) is not null -- check if table still exists in pg catalog 76 | group by pl.id, pl.bbox 77 | """ 78 | 79 | 80 | class LopocsException(Exception): 81 | pass 82 | 83 | 84 | class LopocsTable(): 85 | """ 86 | Used to cache content of pointcloud_lopocs* tables and 87 | avoid roundtrips to the database. 88 | 89 | Outputs attribute looks like : 90 | outputs": [ 91 | { 92 | "offsets": [728630.47, 4676727.02, 309.86], 93 | "scales": [0.01,0.01,0.01], 94 | "stored": true, 95 | "point_schema": [ 96 | { 97 | "type": "unsigned", 98 | "name": "Intensity", 99 | "size": 2 100 | },... 101 | ], 102 | "pcid": 1, 103 | "bbox": [xmin,ymin,zmin,xmax,ymax,zmax] 104 | },... 105 | ] 106 | """ 107 | __slots__ = ( 108 | 'table', 'column', 'srid', 'pcid', 'outputs', 109 | 'max_patches_per_query', 'max_points_per_patch', 'bbox' 110 | ) 111 | 112 | def __init__(self, table, column, srid, pcid, outputs, 113 | max_patches_per_query, max_points_per_patch, bbox): 114 | self.table = table 115 | self.column = column 116 | self.outputs = outputs 117 | self.srid = srid 118 | self.pcid = pcid 119 | self.max_patches_per_query = max_patches_per_query 120 | self.max_points_per_patch = max_points_per_patch 121 | self.bbox = bbox 122 | 123 | def filter_stored_output(self): 124 | ''' 125 | Find and return the output corresponding to the stored patches 126 | ''' 127 | return [ 128 | output 129 | for output in self.outputs 130 | if output['stored'] 131 | ][0] 132 | 133 | def asjson(self): 134 | ''' 135 | return a json representation of this object 136 | ''' 137 | return { 138 | 'table': self.table, 139 | 'column': self.column, 140 | 'outputs': self.outputs, 141 | 'srid': self.srid, 142 | 'pcid': self.pcid, 143 | 'max_patches_per_query': self.max_patches_per_query, 144 | 'max_points_per_patch': self.max_points_per_patch, 145 | 'bbox': self.bbox, 146 | } 147 | 148 | 149 | class Session(): 150 | """ 151 | Session object used as a global connection object to the db 152 | 153 | ``catalog`` contains lopocs table cache 154 | catalog = { 155 | ('public.table', 'column'): object 156 | } 157 | """ 158 | db = None 159 | catalog = defaultdict(dict) 160 | 161 | @classmethod 162 | def clear_catalog(cls): 163 | cls.catalog.clear() 164 | 165 | @classmethod 166 | def fill_catalog(cls): 167 | """ 168 | Get all output tables and fill the catalog 169 | Each output table should have : 170 | 171 | 172 | """ 173 | keys = ('pcid', 'scales', 'offsets', 'point_schema', 'bbox', 'stored') 174 | results = cls.query(LOPOCS_OUTPUTS_QUERY) 175 | for res in results: 176 | cls.catalog[(res[0], res[1])] = LopocsTable( 177 | res[0], res[1], res[2], res[3], 178 | [ 179 | dict(zip(keys, values)) 180 | for values in zip(res[4], res[5], res[6], res[7], res[8], res[9]) 181 | ], 182 | res[10], res[11], res[12] 183 | ) 184 | 185 | @classmethod 186 | def init_app(cls, app): 187 | """ 188 | Initialize db session lazily 189 | """ 190 | query_con = ("postgresql://{PG_USER}:{PG_PASSWORD}@{PG_HOST}:" 191 | "{PG_PORT}/{PG_NAME}" 192 | .format(**app.config)) 193 | cls.pool = ThreadedConnectionPool(1, cpu_count(), query_con) 194 | # keep some configuration element 195 | cls.dbname = app.config["PG_NAME"] 196 | 197 | def __init__(self, table, column): 198 | """ 199 | Initialize a session for a given couple of table and column. 200 | 201 | :param table: table name (with schema prefixed) ex: public.mytable 202 | :param column: column name for patches 203 | """ 204 | if (table, column) not in self.catalog: 205 | if not self.catalog: 206 | # catalog empty 207 | self.fill_catalog() 208 | if (table, column) not in self.catalog: 209 | raise LopocsException('table or column not found in database') 210 | 211 | self.lopocstable = self.catalog[(table, column)] 212 | self.table = table 213 | self.column = column 214 | 215 | @property 216 | def approx_row_count(self): 217 | schema, table = self.table.split('.') 218 | sql = """ 219 | SELECT 220 | reltuples ::BIGINT AS approximate_row_count 221 | FROM pg_class 222 | JOIN pg_catalog.pg_namespace n 223 | ON n.oid = pg_class.relnamespace 224 | WHERE relname = '{}' and nspname = '{}' 225 | """.format(table, schema) 226 | return self.query(sql)[0][0] 227 | 228 | @property 229 | def patch_size(self): 230 | sql = ( 231 | "select pc_summary({})::json->'npts' as npts from {} limit 1" 232 | .format(self.column, self.table) 233 | ) 234 | return self.query(sql)[0][0] 235 | 236 | @property 237 | def numpoints(self): 238 | sql = """ 239 | select sum(pc_numpoints({})) 240 | from {} 241 | """.format(self.column, self.table) 242 | return self.query(sql)[0][0] 243 | 244 | @property 245 | def boundingbox(self): 246 | return self.lopocstable.bbox 247 | 248 | @classmethod 249 | def compute_boundingbox(cls, table, column): 250 | """ 251 | It's faster to use st_extent to find x/y extent then to use 252 | pc_intersect to find the z extent than using pc_intersect for each 253 | dimension. 254 | """ 255 | sql = ( 256 | "SELECT ST_Extent({}::geometry) as table_extent from {}" 257 | .format(column, table) 258 | ) 259 | bb = cls.query(sql)[0][0] 260 | bb_xy = list_from_str_box(bb) 261 | 262 | extent = {} 263 | extent['xmin'] = bb_xy[0] 264 | extent['ymin'] = bb_xy[1] 265 | extent['xmax'] = bb_xy[2] 266 | extent['ymax'] = bb_xy[3] 267 | 268 | sql = """ 269 | select 270 | min(pc_patchmin({0}, 'z')) as zmin 271 | ,max(pc_patchmax({0}, 'z')) as zmax 272 | from {1} 273 | """.format(column, table) 274 | bb_z = cls.query(sql)[0] 275 | 276 | bb = {} 277 | bb.update(extent) 278 | bb['zmin'] = float(bb_z[0]) 279 | bb['zmax'] = float(bb_z[1]) 280 | 281 | return bb 282 | 283 | @property 284 | def srsid(self): 285 | return self.lopocstable.srid 286 | 287 | @property 288 | def srs(self): 289 | sr = SpatialReference() 290 | sr.ImportFromEPSG(self.srsid) 291 | return sr.ExportToWkt() 292 | 293 | @classmethod 294 | def patch2greyhoundschema(cls, table, column): 295 | '''Returns json schema used by Greyhound 296 | with dimension types adapted. 297 | - https://github.com/hobu/greyhound/blob/master/doc/clientDevelopment.rst#schema 298 | - https://www.pdal.io/dimensions.html 299 | ''' 300 | dims = cls.query(""" 301 | select pc_summary({})::json->'dims' from {} limit 1 302 | """.format(column, table))[0][0] 303 | schema = [] 304 | for dim in dims: 305 | schema.append({ 306 | 'size': dim['size'], 307 | 'type': greyhound_types(dim['type']), 308 | 'name': dim['name'], 309 | }) 310 | return schema 311 | 312 | @classmethod 313 | def create_pointcloud_lopocs_table(cls): 314 | ''' 315 | Create some meta tables that stores informations used by lopocs to 316 | stream patches in various formats 317 | 318 | This function uses "packaging.version.parse" to evaluate the current 319 | postgres version; depending on the system, it may returns verbose 320 | answers like "X.X.X (Ubuntu X.X-Xubuntu0.18.04.1)". One has to split 321 | this to keep only the simple version format. 322 | ''' 323 | # to_regclass function changed its signature in postgresql >= 9.6 324 | full_server_version = cls.query('show server_version')[0][0] 325 | server_version = server_version_full.split()[0] # Keep only "X.X.X" 326 | if version.parse(server_version) < version.parse('9.6.0'): 327 | cls.execute(""" 328 | create or replace function to_regclass(text) returns regclass 329 | language sql as 'select to_regclass($1::cstring)' 330 | """) 331 | cls.execute(LOPOCS_TABLES_QUERY) 332 | 333 | @classmethod 334 | def update_metadata(cls, table, column, srid, scale_x, scale_y, scale_z, 335 | offset_x, offset_y, offset_z): 336 | ''' 337 | Add an entry to the lopocs metadata tables to use. 338 | To be used after a fresh pc table creation. 339 | ''' 340 | pcid = cls.query(""" 341 | select pcid from pointcloud_columns 342 | where "schema" = %s and "table" = %s and "column" = %s 343 | """, (table.split('.')[0], table.split('.')[1], column) 344 | )[0][0] 345 | 346 | bbox = cls.compute_boundingbox(table, column) 347 | # compute bbox with offset and scale applied 348 | bbox_scaled = [0] * 6 349 | bbox_scaled[0] = (bbox['xmin'] - offset_x) / scale_x 350 | bbox_scaled[1] = (bbox['ymin'] - offset_y) / scale_y 351 | bbox_scaled[2] = (bbox['zmin'] - offset_z) / scale_z 352 | bbox_scaled[3] = (bbox['xmax'] - offset_x) / scale_x 353 | bbox_scaled[4] = (bbox['ymax'] - offset_y) / scale_y 354 | bbox_scaled[5] = (bbox['zmax'] - offset_z) / scale_z 355 | 356 | res = cls.query(""" 357 | delete from pointcloud_lopocs where schematable = %s and "column" = %s; 358 | insert into pointcloud_lopocs (schematable, "column", srid, bbox) 359 | values (%s, %s, %s, %s) returning id 360 | """, (table, column, table, column, srid, bbox)) 361 | plid = res[0][0] 362 | 363 | scales = scale_x, scale_y, scale_z 364 | offsets = offset_x, offset_y, offset_z 365 | 366 | json_schema = cls.patch2greyhoundschema(table, column) 367 | 368 | cls.execute(""" 369 | insert into pointcloud_lopocs_outputs 370 | (id, pcid, scales, offsets, stored, bbox, point_schema) 371 | values (%s, %s, %s, %s, True, %s, %s) 372 | """, ( 373 | plid, pcid, iterable2pgarray(scales), iterable2pgarray(offsets), 374 | iterable2pgarray(bbox_scaled), Json(json_schema))) 375 | 376 | @classmethod 377 | def add_output_schema(cls, table, column, 378 | scale_x, scale_y, scale_z, 379 | offset_x, offset_y, offset_z, srid, 380 | schema, compression='none'): 381 | """ 382 | Adds a new schema used to stream points. 383 | The new point format will be added to the database if it doesn't exists 384 | """ 385 | bbox = cls.compute_boundingbox(table, column) 386 | 387 | # compute bbox with offset and scale applied 388 | bbox_scaled = [0] * 6 389 | bbox_scaled[0] = (bbox['xmin'] - offset_x) / scale_x 390 | bbox_scaled[1] = (bbox['ymin'] - offset_y) / scale_y 391 | bbox_scaled[2] = (bbox['zmin'] - offset_z) / scale_z 392 | bbox_scaled[3] = (bbox['xmax'] - offset_x) / scale_x 393 | bbox_scaled[4] = (bbox['ymax'] - offset_y) / scale_y 394 | bbox_scaled[5] = (bbox['zmax'] - offset_z) / scale_z 395 | 396 | scales = scale_x, scale_y, scale_z 397 | offsets = offset_x, offset_y, offset_z 398 | 399 | xmlschema = create_pointcloud_schema(schema, scales, offsets) 400 | 401 | # check if the schema already exists 402 | res = Session.query( 403 | """ select pcid from pointcloud_formats 404 | where srid = %s and schema = %s 405 | """, (srid, xmlschema) 406 | ) 407 | if not res: 408 | # insert schema 409 | res = cls.query( 410 | """ with tmp as ( 411 | select max(pcid) + 1 as pcid 412 | from pointcloud_formats 413 | ) 414 | insert into pointcloud_formats 415 | select pcid, %s, %s from tmp 416 | returning pcid 417 | """, (srid, xmlschema) 418 | ) 419 | 420 | pcid = res[0][0] 421 | 422 | # check if lopocs already contains this configuration 423 | plid = cls.query(""" 424 | select id from pointcloud_lopocs 425 | where schematable = %s and "column" = %s; 426 | """, (table, column))[0][0] 427 | 428 | cls.execute(""" 429 | insert into pointcloud_lopocs_outputs 430 | (id, pcid, scales, offsets, stored, bbox, point_schema) 431 | values (%s, %s, %s, %s, False, %s, %s) 432 | """, ( 433 | plid, pcid, iterable2pgarray(scales), iterable2pgarray(offsets), 434 | iterable2pgarray(bbox_scaled), Json(schema))) 435 | 436 | return pcid, bbox_scaled 437 | 438 | @classmethod 439 | @contextmanager 440 | def _conn(cls): 441 | conn = cls.pool.getconn() 442 | conn.autocommit = True 443 | yield conn 444 | cls.pool.putconn(conn) 445 | 446 | @classmethod 447 | @contextmanager 448 | def _execute(cls, query, parameters=None): 449 | with cls._conn() as conn: 450 | with conn.cursor() as cursor: 451 | cursor.execute(query, parameters) 452 | yield cursor 453 | 454 | @classmethod 455 | def execute(cls, query, parameters=None): 456 | """Execute a pg statement without fetching results (use for DDL statement) 457 | """ 458 | with cls._execute(query, parameters): 459 | pass 460 | 461 | @classmethod 462 | def query(cls, query, parameters=None): 463 | """Performs a single query and fetch all results 464 | """ 465 | with cls._execute(query, parameters) as cursor: 466 | res = cursor.fetchall() 467 | return res 468 | -------------------------------------------------------------------------------- /lopocs/greyhound.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | import time 4 | from binascii import unhexlify 5 | from concurrent.futures import ThreadPoolExecutor 6 | 7 | from flask import make_response 8 | 9 | from .database import Session 10 | from .utils import ( 11 | list_from_str, read_in_cache, 12 | write_in_cache, boundingbox_to_polygon, 13 | patch_numpoints, hexa_signed_int32 14 | ) 15 | from .conf import Config 16 | from .stats import Stats 17 | 18 | 19 | # https://github.com/potree/potree/blob/master/src/loader/GreyhoundLoader.js#L194 20 | LOADER_GREYHOUND_MIN_DEPTH = 8 21 | 22 | 23 | def GreyhoundInfo(table, column): 24 | # invoke a new db session 25 | session = Session(table, column) 26 | 27 | box = session.lopocstable.bbox 28 | # get object representing the stored patches format 29 | stored_patches = session.lopocstable.filter_stored_output() 30 | # number of points for the first patch 31 | npoints = session.approx_row_count * session.patch_size 32 | 33 | return { 34 | "baseDepth": 0, 35 | "bounds": [box['xmin'], box['ymin'], box['zmin'], 36 | box['xmax'], box['ymax'], box['zmax']], 37 | "boundsConforming": [box['xmin'], box['ymin'], box['zmin'], 38 | box['xmax'], box['ymax'], box['zmax']], 39 | "numPoints": npoints, 40 | "schema": stored_patches['point_schema'], 41 | "srs": session.srs, 42 | "type": "octree", 43 | "scale": stored_patches['scales'], 44 | "offset": stored_patches['offsets'] 45 | } 46 | 47 | 48 | def GreyhoundRead(table, column, offset, scale, bounds, depth, 49 | depthBegin, depthEnd, schema, compress): 50 | 51 | session = Session(table, column) 52 | 53 | # we treat scales as list 54 | scales = [scale] * 3 55 | # convert string schema to a list of dict 56 | schema = json.loads(schema) 57 | pcid = None 58 | 59 | if offset is None and scale is None and bounds is None: 60 | # normalization request from potree gives no bounds, no scale and 61 | # no offset, only a schema 62 | for output in session.lopocstable.outputs: 63 | if schema == output['point_schema']: 64 | pcid = output['pcid'] 65 | if not pcid: 66 | obj = session.lopocstable.outputs[0] 67 | pcid, bbox = session.add_output_schema( 68 | session.table, session.column, 69 | obj['scales'][0], obj['scales'][1], obj['scales'][2], 70 | obj['offsets'][0], obj['offsets'][1], obj['offsets'][2], 71 | session.lopocstable.srid, schema) 72 | session.lopocstable.outputs.append(dict( 73 | scales=scales, 74 | offsets=obj['offsets'], 75 | pcid=pcid, 76 | point_schema=schema, 77 | stored=False, 78 | bbox=bbox 79 | )) 80 | 81 | else: 82 | offset = list_from_str(offset) 83 | offsets = [round(off, 2) for off in offset] 84 | # check if schema, scale and offset exists in our db 85 | requested = [scales, offsets, schema] 86 | 87 | for output in session.lopocstable.outputs: 88 | oschema = output['point_schema'] 89 | if requested == [output['scales'], output['offsets'], oschema]: 90 | pcid = output['pcid'] 91 | 92 | if not pcid: 93 | # insert new schem 94 | pcid, bbox = session.add_output_schema( 95 | session.table, session.column, 96 | scales[0], scales[1], scales[2], 97 | offsets[0], offsets[1], offsets[2], 98 | session.lopocstable.srid, schema) 99 | # update cache 100 | session.lopocstable.outputs.append(dict( 101 | scales=scales, 102 | offsets=offsets, 103 | pcid=pcid, 104 | point_schema=schema, 105 | stored=False, 106 | bbox=bbox 107 | )) 108 | 109 | # prepare parameters 110 | if not bounds and depth == 0: 111 | bbox = [ 112 | session.boundingbox['xmin'], 113 | session.boundingbox['ymin'], 114 | session.boundingbox['zmin'], 115 | session.boundingbox['xmax'], 116 | session.boundingbox['ymax'], 117 | session.boundingbox['zmax'] 118 | ] 119 | else: 120 | bbox = list_from_str(bounds) 121 | # apply scale and offset to bbox for querying database 122 | bbox[0] = bbox[0] * scales[0] + offset[0] 123 | bbox[1] = bbox[1] * scales[1] + offset[1] 124 | bbox[2] = bbox[2] * scales[2] + offset[2] 125 | bbox[3] = bbox[3] * scales[0] + offset[0] 126 | bbox[4] = bbox[4] * scales[1] + offset[1] 127 | bbox[5] = bbox[5] * scales[2] + offset[2] 128 | 129 | if depth is not None: 130 | lod = depth 131 | else: 132 | lod = depthEnd - 1 133 | if lod >= LOADER_GREYHOUND_MIN_DEPTH: 134 | lod -= LOADER_GREYHOUND_MIN_DEPTH 135 | 136 | # get points in database 137 | if Config.STATS: 138 | t0 = int(round(time.time() * 1000)) 139 | 140 | [read, npoints] = get_points(session, bbox, pcid, lod, compress) 141 | 142 | if Config.STATS: 143 | t1 = int(round(time.time() * 1000)) 144 | 145 | # log stats 146 | if npoints > 0 and Config.STATS: 147 | stats = Stats.get() 148 | stats_npoints = stats['npoints'] + npoints 149 | Stats.set(stats_npoints, (t1 - t0) + stats['time_msec']) 150 | stats = Stats.get() 151 | print("Points/sec: ", stats['rate_sec']) 152 | 153 | # build flask response 154 | response = make_response(read) 155 | response.headers['content-type'] = 'application/octet-stream' 156 | return response 157 | 158 | 159 | def GreyhoundHierarchy(table, column, bounds, depthBegin, depthEnd, scale, offset): 160 | 161 | session = Session(table, column) 162 | 163 | lod_min = depthBegin - LOADER_GREYHOUND_MIN_DEPTH 164 | 165 | lod_max = depthEnd - LOADER_GREYHOUND_MIN_DEPTH - 1 166 | if lod_max > (Config.DEPTH - 1): 167 | lod_max = Config.DEPTH - 1 168 | 169 | bbox = list_from_str(bounds) 170 | 171 | if offset: 172 | # apply scale and offset if needed 173 | offset = list_from_str(offset) 174 | bbox[0] = bbox[0] * scale + offset[0] 175 | bbox[1] = bbox[1] * scale + offset[1] 176 | bbox[2] = bbox[2] * scale + offset[2] 177 | bbox[3] = bbox[3] * scale + offset[0] 178 | bbox[4] = bbox[4] * scale + offset[1] 179 | bbox[5] = bbox[5] * scale + offset[2] 180 | 181 | if lod_min == 0 and Config.ROOT_HCY: 182 | filename = Config.ROOT_HCY 183 | else: 184 | filename = ("{0}_{1}_{2}_{3}_{4}.hcy" 185 | .format(session.table, session.column, lod_min, lod_max, 186 | '_'.join(str(e) for e in bbox))) 187 | cached_hcy = read_in_cache(filename) 188 | 189 | if Config.DEBUG: 190 | print("hierarchy file: {0}".format(filename)) 191 | 192 | if cached_hcy: 193 | resp = cached_hcy 194 | else: 195 | 196 | new_hcy = build_hierarchy_from_pg(session, lod_min, lod_max, bbox) 197 | write_in_cache(new_hcy, filename) 198 | resp = new_hcy 199 | 200 | return resp 201 | 202 | 203 | def sql_hierarchy(session, box, lod): 204 | poly = boundingbox_to_polygon(box) 205 | 206 | maxpp_patch = session.lopocstable.max_points_per_patch 207 | maxpp_query = session.lopocstable.max_patches_per_query 208 | 209 | # retrieve the number of points to select in a pcpatch 210 | range_min = 1 211 | range_max = 1 212 | 213 | if maxpp_patch: 214 | range_min = 1 215 | range_max = maxpp_patch 216 | else: 217 | beg = 0 218 | for i in range(0, lod): 219 | beg = beg + pow(4, i) 220 | 221 | end = 0 222 | for i in range(0, lod + 1): 223 | end = end + pow(4, i) 224 | 225 | range_min = beg + 1 226 | range_max = end - beg 227 | 228 | # build the sql query 229 | sql_limit = "" 230 | if maxpp_query: 231 | sql_limit = " limit {0} ".format(maxpp_query) 232 | 233 | if Config.USE_MORTON: 234 | sql = """ 235 | select 236 | pc_union(pc_filterbetween(pc_range({0}, {4}, {5}), 'Z', {6}, {7} )) 237 | from 238 | ( 239 | select {0} from {1} 240 | where pc_intersects( 241 | {0}, 242 | st_geomfromtext('polygon (({2}))',{3}) 243 | ) order by morton {8} 244 | )_ 245 | """.format(session.column, session.table, poly, session.srsid, 246 | range_min, range_max, box[2], box[5], sql_limit) 247 | else: 248 | sql = """ 249 | select 250 | pc_union(pc_filterbetween(pc_range({0}, {4}, {5}), 'Z', {6}, {7} )) 251 | from 252 | ( 253 | select {0} from {1} 254 | where pc_intersects( 255 | {0}, 256 | st_geomfromtext('polygon (({2}))',{3}) 257 | ) {8} 258 | )_ 259 | """.format(session.column, session.table, poly, session.srsid, 260 | range_min, range_max, box[2], box[5], sql_limit) 261 | return sql 262 | 263 | 264 | def get_points_query(session, box, schema_pcid, lod, compress): 265 | poly = boundingbox_to_polygon(box) 266 | 267 | # retrieve the number of points to select in a pcpatch 268 | range_min = 1 269 | range_max = 1 270 | 271 | maxppp = session.lopocstable.max_points_per_patch 272 | 273 | if maxppp: 274 | range_min = 1 275 | range_max = maxppp 276 | else: 277 | # adapted to midoc filter 278 | beg = 0 279 | for i in range(0, lod): 280 | beg = beg + pow(4, i) 281 | 282 | end = 0 283 | for i in range(0, lod + 1): 284 | end = end + pow(4, i) 285 | 286 | range_min = beg + 1 287 | range_max = end - beg 288 | 289 | # build the sql query 290 | sql_limit = "" 291 | maxppq = session.lopocstable.max_patches_per_query 292 | if maxppq: 293 | sql_limit = " limit {0} ".format(maxppq) 294 | 295 | if Config.USE_MORTON: 296 | sql = "select " 297 | if compress: 298 | sql += "pc_compress(" 299 | sql += """ 300 | pc_transform( 301 | pc_union( 302 | pc_filterbetween( 303 | pc_range({0}, {4}, {5}), 304 | 'Z', {6}, {7} 305 | ) 306 | ), {9} 307 | ) 308 | """ 309 | if compress: 310 | sql += ", 'laz')" 311 | sql += """ 312 | from 313 | ( 314 | select {0} from {1} 315 | where pc_intersects( 316 | {0}, 317 | st_geomfromtext('polygon (({2}))',{3})) 318 | order by morton {8} 319 | )_ 320 | """ 321 | else: 322 | sql = "select " 323 | if compress: 324 | sql += "pc_compress(" 325 | sql += """ 326 | pc_transform( 327 | pc_union( 328 | pc_filterbetween( 329 | pc_range({0}, {4}, {5}), 330 | 'Z', {6}, {7} 331 | ) 332 | ), {9} 333 | ) 334 | """ 335 | if compress: 336 | sql += ", 'laz')" 337 | sql += """ 338 | from 339 | ( 340 | select {0} from {1} 341 | where pc_intersects( 342 | {0}, 343 | st_geomfromtext('polygon (({2}))',{3})) 344 | {8} 345 | )_ 346 | """ 347 | 348 | sql = sql.format(session.column, session.table, poly, session.srsid, 349 | range_min, range_max, box[2], box[5], sql_limit, 350 | schema_pcid) 351 | return sql 352 | 353 | 354 | def get_points(session, box, schema_pcid, lod, compress): 355 | 356 | npoints = 0 357 | hexbuffer = bytearray() 358 | sql = get_points_query(session, box, schema_pcid, lod, compress) 359 | 360 | if Config.DEBUG: 361 | print(sql) 362 | 363 | try: 364 | pcpatch_wkb = session.query(sql)[0][0] 365 | # to test output from pgpointcloud : 366 | 367 | # get json schema representation 368 | # schema = session.lopocstable.point_schema 369 | # if compress: 370 | # decompress(pcpatch_wkb, schema) 371 | 372 | # retrieve number of points in wkb pgpointcloud patch 373 | npoints = patch_numpoints(pcpatch_wkb) 374 | 375 | # extract data 376 | offset = 34 if compress else 30 377 | hexbuffer = unhexlify(pcpatch_wkb[offset:]) 378 | 379 | # add number of points 380 | hexbuffer += hexa_signed_int32(npoints) 381 | except: 382 | hexbuffer.extend(hexa_signed_int32(0)) 383 | 384 | if Config.DEBUG: 385 | print("LOD: ", lod) 386 | print("DEPTH: ", Config.DEPTH) 387 | print("NUM POINTS RETURNED: ", npoints) 388 | 389 | return [hexbuffer, npoints] 390 | 391 | 392 | def fake_hierarchy(begin, end, npatchs): 393 | p = {} 394 | begin = begin + 1 395 | 396 | if begin != end: 397 | p['n'] = npatchs 398 | 399 | if begin != (end - 1): 400 | p['nwu'] = fake_hierarchy(begin, end, npatchs) 401 | p['nwd'] = fake_hierarchy(begin, end, npatchs) 402 | p['neu'] = fake_hierarchy(begin, end, npatchs) 403 | p['ned'] = fake_hierarchy(begin, end, npatchs) 404 | p['swu'] = fake_hierarchy(begin, end, npatchs) 405 | p['swd'] = fake_hierarchy(begin, end, npatchs) 406 | p['seu'] = fake_hierarchy(begin, end, npatchs) 407 | p['sed'] = fake_hierarchy(begin, end, npatchs) 408 | 409 | return p 410 | 411 | 412 | def build_hierarchy_from_pg(session, lod, lod_max, bbox): 413 | 414 | # pcid is needed to get max attributes 415 | sql = sql_hierarchy(session, bbox, lod) 416 | pcpatch_wkb = session.query(sql)[0][0] 417 | 418 | hierarchy = {} 419 | if lod <= lod_max and pcpatch_wkb: 420 | npoints = patch_numpoints(pcpatch_wkb) 421 | hierarchy['n'] = npoints 422 | 423 | lod += 1 424 | 425 | # run leaf in threads 426 | if lod <= lod_max: 427 | # width / length / height 428 | width = bbox[3] - bbox[0] 429 | length = bbox[4] - bbox[1] 430 | height = bbox[5] - bbox[2] 431 | 432 | up = bbox[5] 433 | middle = up - height / 2 434 | down = bbox[2] 435 | 436 | x = bbox[0] 437 | y = bbox[1] 438 | 439 | # build bboxes for leaf 440 | bbox_nwd = [x, y + length / 2, down, x + width / 2, y + length, middle] 441 | bbox_nwu = [x, y + length / 2, middle, x + width / 2, y + length, up] 442 | bbox_ned = [x + width / 2, y + length / 2, down, x + width, y + length, middle] 443 | bbox_neu = [x + width / 2, y + length / 2, middle, x + width, y + length, up] 444 | bbox_swd = [x, y, down, x + width / 2, y + length / 2, middle] 445 | bbox_swu = [x, y, middle, x + width / 2, y + length / 2, up] 446 | bbox_sed = [x + width / 2, y, down, x + width, y + length / 2, middle] 447 | bbox_seu = [x + width / 2, y, middle, x + width, y + length / 2, up] 448 | 449 | # run leaf in threads 450 | futures = {} 451 | with ThreadPoolExecutor(max_workers=Session.pool.maxconn) as e: 452 | futures["nwd"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_nwd) 453 | futures["nwu"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_nwu) 454 | futures["ned"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_ned) 455 | futures["neu"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_neu) 456 | futures["swd"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_swd) 457 | futures["swu"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_swu) 458 | futures["sed"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_sed) 459 | futures["seu"] = e.submit(build_hierarchy_from_pg_single, session, lod, lod_max, bbox_seu) 460 | 461 | for code, hier in futures.items(): 462 | hierarchy[code] = hier.result() 463 | 464 | return hierarchy 465 | 466 | 467 | def build_hierarchy_from_pg_single(session, lod, lod_max, bbox): 468 | # run sql 469 | sql = sql_hierarchy(session, bbox, lod) 470 | pcpatch_wkb = session.query(sql)[0][0] 471 | hierarchy = {} 472 | if lod <= lod_max and pcpatch_wkb: 473 | npoints = patch_numpoints(pcpatch_wkb) 474 | hierarchy['n'] = npoints 475 | 476 | lod += 1 477 | 478 | if lod <= lod_max: 479 | # width / length / height 480 | width = bbox[3] - bbox[0] 481 | length = bbox[4] - bbox[1] 482 | height = bbox[5] - bbox[2] 483 | 484 | up = bbox[5] 485 | middle = up - height / 2 486 | down = bbox[2] 487 | 488 | x = bbox[0] 489 | y = bbox[1] 490 | 491 | # nwd 492 | bbox_nwd = [x, y + length / 2, down, x + width / 2, y + length, middle] 493 | h_nwd = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_nwd) 494 | if h_nwd: 495 | hierarchy['nwd'] = h_nwd 496 | 497 | # nwu 498 | bbox_nwu = [x, y + length / 2, middle, x + width / 2, y + length, up] 499 | h_nwu = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_nwu) 500 | if h_nwu: 501 | hierarchy['nwu'] = h_nwu 502 | 503 | # ned 504 | bbox_ned = [x + width / 2, y + length / 2, down, x + width, y + length, middle] 505 | h_ned = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_ned) 506 | if h_ned: 507 | hierarchy['ned'] = h_ned 508 | 509 | # neu 510 | bbox_neu = [x + width / 2, y + length / 2, middle, x + width, y + length, up] 511 | h_neu = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_neu) 512 | if h_neu: 513 | hierarchy['neu'] = h_neu 514 | 515 | # swd 516 | bbox_swd = [x, y, down, x + width / 2, y + length / 2, middle] 517 | h_swd = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_swd) 518 | if h_swd: 519 | hierarchy['swd'] = h_swd 520 | 521 | # swu 522 | bbox_swu = [x, y, middle, x + width / 2, y + length / 2, up] 523 | h_swu = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_swu) 524 | if h_swu: 525 | hierarchy['swu'] = h_swu 526 | 527 | # sed 528 | bbox_sed = [x + width / 2, y, down, x + width, y + length / 2, middle] 529 | h_sed = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_sed) 530 | if h_sed: 531 | hierarchy['sed'] = h_sed 532 | 533 | # seu 534 | bbox_seu = [x + width / 2, y, middle, x + width, y + length / 2, up] 535 | h_seu = build_hierarchy_from_pg_single(session, lod, lod_max, bbox_seu) 536 | if h_seu: 537 | hierarchy['seu'] = h_seu 538 | 539 | return hierarchy 540 | -------------------------------------------------------------------------------- /lopocs/potreeschema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | potree_schema = [ 4 | { 5 | "name": "X", 6 | "size": 4, 7 | "type": "signed" 8 | }, 9 | { 10 | "name": "Y", 11 | "size": 4, 12 | "type": "signed" 13 | }, 14 | { 15 | "name": "Z", 16 | "size": 4, 17 | "type": "signed" 18 | }, 19 | { 20 | "name": "Intensity", 21 | "size": 2, 22 | "type": "unsigned" 23 | }, 24 | { 25 | "name": "Classification", 26 | "size": 1, 27 | "type": "unsigned" 28 | }, 29 | { 30 | "name": "Red", 31 | "size": 2, 32 | "type": "unsigned" 33 | }, 34 | { 35 | "name": "Green", 36 | "size": 2, 37 | "type": "unsigned" 38 | }, 39 | { 40 | "name": "Blue", 41 | "size": 2, 42 | "type": "unsigned" 43 | } 44 | ] 45 | 46 | ctypes_map = { 47 | ('unsigned', 1): 'uint8_t', 48 | ('unsigned', 2): 'uint16_t', 49 | ('unsigned', 4): 'uint32_t', 50 | ('signed', 2): 'int16_t', 51 | ('signed', 4): 'int32_t', 52 | ('floating', 4): 'float', 53 | } 54 | 55 | dim_skeleton_xyz = """ 56 | {pos} 57 | {size} 58 | {name} 59 | {name} 60 | {ctype} 61 | {scale} 62 | {offset} 63 | true 64 | 65 | """ 66 | 67 | dim_skeleton = """ 68 | {pos} 69 | {size} 70 | {name} 71 | {name} 72 | {ctype} 73 | true 74 | 75 | """ 76 | 77 | 78 | schema_skeleton = """ 79 | 80 | {dims} 81 | 82 | {compression} 83 | point 84 | """ 85 | 86 | 87 | def dim_mapper(dimension, scales, offsets, pos): 88 | '''redirect to correct xml description depending 89 | of the dimension type 90 | ''' 91 | if dimension['name'].lower() in ('x', 'y', 'z'): 92 | return dim_skeleton_xyz.format( 93 | **dict(dimension, 94 | ctype=ctypes_map[(dimension['type'], dimension['size'])], 95 | scale=scales[dim_arr_index(dimension)], 96 | offset=offsets[dim_arr_index(dimension)], 97 | pos=pos) 98 | ) 99 | 100 | return dim_skeleton.format( 101 | **dict(dimension, 102 | ctype=ctypes_map[(dimension['type'], dimension['size'])], 103 | pos=pos)) 104 | 105 | 106 | def dim_arr_index(dim): 107 | index = {'x': 0, 'y': 1, 'z': 2} 108 | return index[dim['name'].lower()] 109 | 110 | 111 | def create_pointcloud_schema(dimensions, scales, offsets, compression='none'): 112 | ''' 113 | Create a pointcloud schema corresponding with given parameters 114 | Dimensions looks like : 115 | [ 116 | { 117 | "name": "X", 118 | "size": 4, 119 | "type": "signed" 120 | },... 121 | ] 122 | :param scales: array of 3 scales for x, y, z 123 | :param ofsets: array of 3 offset 124 | ''' 125 | pcschema = schema_skeleton.format( 126 | compression=compression, 127 | dims=''.join( 128 | dim_mapper(d, scales, offsets, pos) 129 | for pos, d in enumerate(dimensions, start=1) 130 | ), 131 | ) 132 | return pcschema 133 | 134 | 135 | potree_page = """ 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | Potree Viewer 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 |
173 | 174 |
175 | 180 | 181 |
182 |
183 | 184 |
185 |
186 | 187 | 238 | 239 | 240 | 241 | """ 242 | -------------------------------------------------------------------------------- /lopocs/stats.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import redis 4 | from .conf import Config 5 | 6 | 7 | class Stats(): 8 | 9 | r = None 10 | 11 | @classmethod 12 | def set(cls, n, t): 13 | cls.r.set('npoints', str(n).encode('utf-8')) 14 | cls.r.set('time_msec', str(t).encode('utf-8')) 15 | 16 | @classmethod 17 | def get(cls): 18 | stats = {} 19 | 20 | t = int(cls.r.get('time_msec').decode('utf-8')) 21 | stats['time_msec'] = t 22 | 23 | n = int(cls.r.get('npoints').decode('utf-8')) 24 | stats['npoints'] = n 25 | 26 | if t > 0: 27 | stats['rate_msec'] = n/t 28 | stats['rate_sec'] = (n/t)*1000 29 | else: 30 | stats['rate_msec'] = 0.0 31 | stats['rate_sec'] = 0.0 32 | 33 | return stats 34 | 35 | @classmethod 36 | def init(cls): 37 | cls.r = redis.StrictRedis(host='127.0.0.1', 38 | port=Config.STATS_SERVER_PORT, db=0) 39 | cls.r.set('rate', str(0.0).encode('utf-8')) 40 | cls.r.set('npoints', str(0).encode('utf-8')) 41 | cls.r.set('time_msec', str(0).encode('utf-8')) 42 | -------------------------------------------------------------------------------- /lopocs/threedtiles.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | import math 4 | 5 | import numpy as np 6 | from flask import make_response 7 | 8 | from py3dtiles.feature_table import ( 9 | FeatureTableHeader, FeatureTableBody, FeatureTable 10 | ) 11 | from py3dtiles.pnts import PntsBody, PntsHeader, Pnts 12 | 13 | from .utils import ( 14 | read_uncompressed_patch, boundingbox_to_polygon, list_from_str, patch_numpoints 15 | ) 16 | from .conf import Config 17 | from .database import Session 18 | 19 | LOD_MIN = 0 20 | LOD_MAX = 5 21 | LOD_LEN = LOD_MAX + 1 - LOD_MIN 22 | 23 | 24 | def ThreeDTilesInfo(table, column): 25 | 26 | session = Session(table, column) 27 | # bounding box 28 | box = session.boundingbox 29 | 30 | # number of points for the first patch 31 | npoints = session.approx_row_count * session.patch_size 32 | 33 | # srs 34 | srs = session.srs 35 | 36 | # build json 37 | return { 38 | "bounds": [box['xmin'], box['ymin'], box['zmin'], 39 | box['xmax'], box['ymax'], box['zmax']], 40 | "numPoints": npoints, 41 | "srs": srs 42 | } 43 | 44 | 45 | def ThreeDTilesRead(table, column, bounds, lod): 46 | 47 | session = Session(table, column) 48 | # offsets = [round(off, 2) for off in list_from_str(offsets)] 49 | box = list_from_str(bounds) 50 | # requested = [scales, offsets] 51 | stored_patches = session.lopocstable.filter_stored_output() 52 | schema = stored_patches['point_schema'] 53 | pcid = stored_patches['pcid'] 54 | # scales = [scale] * 3 55 | scales = stored_patches['scales'] 56 | offsets = stored_patches['offsets'] 57 | 58 | [tile, npoints] = get_points(session, box, lod, offsets, pcid, scales, schema) 59 | 60 | if Config.DEBUG: 61 | tile.sync() 62 | print("NPOINTS: ", npoints) 63 | 64 | # build flask response 65 | response = make_response(tile.to_array().tostring()) 66 | response.headers['content-type'] = 'application/octet-stream' 67 | return response 68 | 69 | 70 | def classification_to_rgb(points): 71 | """ 72 | map LAS Classification to RGB colors. 73 | See LAS spec for codes : 74 | http://www.asprs.org/wp-content/uploads/2010/12/asprs_las_format_v11.pdf 75 | 76 | :param points: points as a structured numpy array 77 | :returns: numpy.record with dtype [('Red', 'u1'), ('Green', 'u1'), ('Blue', 'u1')]) 78 | """ 79 | # building (brown) 80 | building_mask = (points['Classification'] == 6).astype(np.int) 81 | red = building_mask * 186 82 | green = building_mask * 79 83 | blue = building_mask * 63 84 | # high vegetation (green) 85 | veget_mask = (points['Classification'] == 5).astype(np.int) 86 | red += veget_mask * 140 87 | green += veget_mask * 156 88 | blue += veget_mask * 8 89 | # medium vegetation 90 | veget_mask = (points['Classification'] == 4).astype(np.int) 91 | red += veget_mask * 171 92 | green += veget_mask * 200 93 | blue += veget_mask * 116 94 | # low vegetation 95 | veget_mask = (points['Classification'] == 3).astype(np.int) 96 | red += veget_mask * 192 97 | green += veget_mask * 213 98 | blue += veget_mask * 160 99 | # water (blue) 100 | water_mask = (points['Classification'] == 9).astype(np.int) 101 | red += water_mask * 141 102 | green += water_mask * 179 103 | blue += water_mask * 198 104 | # ground (light brown) 105 | grd_mask = (points['Classification'] == 2).astype(np.int) 106 | red += grd_mask * 226 107 | green += grd_mask * 230 108 | blue += grd_mask * 229 109 | # Unclassified (grey) 110 | grd_mask = (points['Classification'] == 1).astype(np.int) 111 | red += grd_mask * 176 112 | green += grd_mask * 185 113 | blue += grd_mask * 182 114 | 115 | rgb_reduced = np.c_[red, green, blue] 116 | rgb = np.array(np.core.records.fromarrays(rgb_reduced.T, dtype=cdt)) 117 | return rgb 118 | 119 | 120 | cdt = np.dtype([('Red', np.uint8), ('Green', np.uint8), ('Blue', np.uint8)]) 121 | pdt = np.dtype([('X', np.float32), ('Y', np.float32), ('Z', np.float32)]) 122 | 123 | 124 | def get_points(session, box, lod, offsets, pcid, scales, schema): 125 | sql = sql_query(session, box, pcid, lod) 126 | if Config.DEBUG: 127 | print(sql) 128 | 129 | pcpatch_wkb = session.query(sql)[0][0] 130 | points, npoints = read_uncompressed_patch(pcpatch_wkb, schema) 131 | fields = points.dtype.fields.keys() 132 | 133 | if 'Red' in fields: 134 | if max(points['Red']) > 255: 135 | # normalize 136 | rgb_reduced = np.c_[points['Red'] % 255, points['Green'] % 255, points['Blue'] % 255] 137 | rgb = np.array(np.core.records.fromarrays(rgb_reduced.T, dtype=cdt)) 138 | else: 139 | rgb = points[['Red', 'Green', 'Blue']].astype(cdt) 140 | elif 'Classification' in fields: 141 | rgb = classification_to_rgb(points) 142 | else: 143 | # No colors 144 | # FIXME: compute color gradient based on elevation 145 | rgb_reduced = np.zeros((npoints, 3), dtype=int) 146 | rgb = np.array(np.core.records.fromarrays(rgb_reduced, dtype=cdt)) 147 | 148 | quantized_points_r = np.c_[ 149 | points['X'] * scales[0], 150 | points['Y'] * scales[1], 151 | points['Z'] * scales[2] 152 | ] 153 | 154 | quantized_points = np.array(np.core.records.fromarrays(quantized_points_r.T, dtype=pdt)) 155 | 156 | fth = FeatureTableHeader.from_dtype( 157 | quantized_points.dtype, rgb.dtype, npoints 158 | ) 159 | ftb = FeatureTableBody() 160 | ftb.positions_itemsize = fth.positions_dtype.itemsize 161 | ftb.colors_itemsize = fth.colors_dtype.itemsize 162 | ftb.positions_arr = quantized_points.view(np.uint8) 163 | ftb.colors_arr = rgb.view(np.uint8) 164 | 165 | ft = FeatureTable() 166 | ft.header = fth 167 | ft.body = ftb 168 | 169 | # tile 170 | tb = PntsBody() 171 | tb.feature_table = ft 172 | th = PntsHeader() 173 | tile = Pnts() 174 | tile.body = tb 175 | tile.header = th 176 | tile.body.feature_table.header.rtc = offsets 177 | 178 | return [tile, npoints] 179 | 180 | 181 | def sql_query(session, box, pcid, lod): 182 | poly = boundingbox_to_polygon(box) 183 | 184 | maxppp = session.lopocstable.max_points_per_patch 185 | # FIXME: need to be cached 186 | patch_size = session.patch_size 187 | 188 | if maxppp: 189 | range_min = 1 190 | range_max = maxppp 191 | else: 192 | # FIXME: may skip some points if patch_size/lod_len is decimal 193 | # we need to fix either here or at loading with the patch_size and lod bounds 194 | range_min = lod * int(patch_size / LOD_LEN) + 1 195 | range_max = (lod + 1) * int(patch_size / LOD_LEN) 196 | 197 | # build the sql query 198 | sql_limit = "" 199 | maxppq = session.lopocstable.max_patches_per_query 200 | if maxppq: 201 | sql_limit = " limit {0} ".format(maxppq) 202 | 203 | if Config.USE_MORTON: 204 | sql = ("select pc_union(" 205 | "pc_filterbetween( " 206 | "pc_range({0}, {4}, {5}), 'Z', {6}, {7} )) from " 207 | "(select {0} from {1} " 208 | "where pc_intersects({0}, st_geomfromtext('polygon ((" 209 | "{2}))',{3})) order by morton {8})_;" 210 | .format(session.column, session.table, 211 | poly, session.srsid, range_min, range_max, 212 | box[2] - 0.1, box[5] + 0.1, sql_limit, 213 | pcid)) 214 | else: 215 | sql = ("select pc_compress(pc_transform(pc_union(" 216 | "pc_filterbetween( " 217 | "pc_range({0}, {4}, {5}), 'Z', {6}, {7} )), {9}), 'laz') from " 218 | "(select {0} from {1} where pc_intersects({0}, " 219 | "st_geomfromtext('polygon (({2}))',{3})) {8})_;" 220 | .format(session.column, session.table, 221 | poly, session.srsid, range_min, range_max, 222 | box[2], box[5], sql_limit, 223 | pcid)) 224 | 225 | return sql 226 | 227 | 228 | def buildbox(bbox): 229 | width = bbox[3] - bbox[0] 230 | depth = bbox[4] - bbox[1] 231 | height = bbox[5] - bbox[2] 232 | midx = bbox[0] + width / 2 233 | midy = bbox[1] + depth / 2 234 | midz = bbox[2] + height / 2 235 | 236 | box = [midx, midy, midz] 237 | box.append(width / 2.0) 238 | box.append(0.0) 239 | box.append(0.0) 240 | box.append(0.0) 241 | box.append(depth / 2.0) 242 | box.append(0.0) 243 | box.append(0.0) 244 | box.append(0.0) 245 | box.append(height / 2.0) 246 | return box 247 | 248 | 249 | def build_hierarchy_from_pg(session, baseurl, bbox): 250 | 251 | stored_patches = session.lopocstable.filter_stored_output() 252 | pcid = stored_patches['pcid'] 253 | offsets = stored_patches['offsets'] 254 | tileset = {} 255 | tileset["asset"] = {"version": "0.0"} 256 | tileset["geometricError"] = math.sqrt( 257 | (bbox[3] - bbox[0]) ** 2 + (bbox[4] - bbox[1]) ** 2 + (bbox[5] - bbox[2]) ** 2 258 | ) 259 | if Config.DEBUG: 260 | print('tileset geometricErroc', tileset["geometricError"]) 261 | 262 | bvol = {} 263 | bvol["box"] = buildbox(bbox) 264 | 265 | lod_str = "lod={0}".format(LOD_MIN) 266 | bounds = ("bounds=[{0},{1},{2},{3},{4},{5}]" 267 | .format(bbox[0], bbox[1], bbox[2], bbox[3], bbox[4], bbox[5])) 268 | resource = "{}.{}".format(session.table, session.column) 269 | 270 | base_url = "{0}/3dtiles/{1}/read.pnts".format(baseurl, resource) 271 | url = ( 272 | "{0}?{1}&{2}" 273 | .format(base_url, lod_str, bounds) 274 | ) 275 | 276 | GEOMETRIC_ERROR = tileset["geometricError"] 277 | 278 | root = {} 279 | root["refine"] = "add" 280 | root["boundingVolume"] = bvol 281 | root["geometricError"] = GEOMETRIC_ERROR / 20 282 | root["content"] = {"url": url} 283 | 284 | lod = 1 285 | children_list = [] 286 | for bb in split_bbox(bbox): 287 | json_children = children( 288 | session, baseurl, offsets, bb, lod, pcid, GEOMETRIC_ERROR / 40 289 | ) 290 | if len(json_children): 291 | children_list.append(json_children) 292 | 293 | if len(children_list): 294 | root["children"] = children_list 295 | 296 | tileset["root"] = root 297 | 298 | return json.dumps(tileset, indent=2, separators=(',', ': ')) 299 | 300 | 301 | def build_children_section(session, baseurl, offsets, bbox, err, lod): 302 | 303 | cjson = {} 304 | 305 | lod = "lod={0}".format(lod) 306 | bounds = ("bounds=[{0},{1},{2},{3},{4},{5}]" 307 | .format(bbox[0], bbox[1], bbox[2], bbox[3], bbox[4], bbox[5])) 308 | 309 | resource = "{}.{}".format(session.table, session.column) 310 | baseurl = "{0}/3dtiles/{1}/read.pnts".format(baseurl, resource) 311 | url = "{0}?{1}&{2}".format(baseurl, lod, bounds) 312 | 313 | bvol = {} 314 | bvol["box"] = buildbox(bbox) 315 | 316 | cjson["boundingVolume"] = bvol 317 | cjson["geometricError"] = err 318 | cjson["content"] = {"url": url} 319 | 320 | return cjson 321 | 322 | 323 | def split_bbox(bbox): 324 | width = bbox[3] - bbox[0] 325 | length = bbox[4] - bbox[1] 326 | height = bbox[5] - bbox[2] 327 | 328 | up = bbox[5] 329 | middle = up - height / 2 330 | down = bbox[2] 331 | 332 | x = bbox[0] 333 | y = bbox[1] 334 | 335 | bbox_nwd = [x, y + length / 2, down, x + width / 2, y + length, middle] 336 | bbox_nwu = [x, y + length / 2, middle, x + width / 2, y + length, up] 337 | bbox_ned = [x + width / 2, y + length / 2, down, x + width, y + length, middle] 338 | bbox_neu = [x + width / 2, y + length / 2, middle, x + width, y + length, up] 339 | bbox_swd = [x, y, down, x + width / 2, y + length / 2, middle] 340 | bbox_swu = [x, y, middle, x + width / 2, y + length / 2, up] 341 | bbox_sed = [x + width / 2, y, down, x + width, y + length / 2, middle] 342 | bbox_seu = [x + width / 2, y, middle, x + width, y + length / 2, up] 343 | 344 | return [bbox_nwd, bbox_nwu, bbox_ned, bbox_neu, bbox_swd, bbox_swu, 345 | bbox_sed, bbox_seu] 346 | 347 | 348 | def children(session, baseurl, offsets, bbox, lod, pcid, err): 349 | 350 | # run sql 351 | sql = sql_query(session, bbox, pcid, lod) 352 | pcpatch_wkb = session.query(sql)[0][0] 353 | 354 | json_me = {} 355 | if lod <= LOD_MAX and pcpatch_wkb: 356 | npoints = patch_numpoints(pcpatch_wkb) 357 | if npoints > 0: 358 | json_me = build_children_section(session, baseurl, offsets, bbox, err, lod) 359 | 360 | lod += 1 361 | 362 | children_list = [] 363 | if lod <= LOD_MAX: 364 | for bb in split_bbox(bbox): 365 | json_children = children( 366 | session, baseurl, offsets, bb, lod, pcid, err / 2 367 | ) 368 | 369 | if len(json_children): 370 | children_list.append(json_children) 371 | 372 | if len(children_list): 373 | json_me["children"] = children_list 374 | 375 | return json_me 376 | -------------------------------------------------------------------------------- /lopocs/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | import math 4 | from struct import pack, unpack 5 | from binascii import unhexlify 6 | import os 7 | import decimal 8 | 9 | import numpy as np 10 | from lazperf import buildNumpyDescription, Decompressor 11 | 12 | from .conf import Config 13 | 14 | 15 | numpy_types_map = { 16 | ('unsigned', 1): np.uint8, 17 | ('unsigned', 2): np.uint16, 18 | ('unsigned', 4): np.uint32, 19 | ('signed', 2): np.int16, 20 | ('signed', 4): np.int32, 21 | ('floating', 4): np.float32, 22 | ('floating', 8): np.float64, 23 | } 24 | 25 | 26 | def schema_dtype(schema): 27 | '''Given a patch schema (greyhound like schema) 28 | convert it into a numpy dtype description 29 | http://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html 30 | ''' 31 | formats = [ 32 | numpy_types_map[(dim['type'], dim['size'])] 33 | for dim in schema 34 | ] 35 | 36 | return np.dtype( 37 | {'names': [dim['name'] for dim in schema], 'formats': formats}) 38 | 39 | 40 | def read_uncompressed_patch(pcpatch_wkb, schema): 41 | ''' 42 | Patch binary structure uncompressed: 43 | byte: endianness (1 = NDR, 0 = XDR) 44 | uint32: pcid (key to POINTCLOUD_SCHEMAS) 45 | uint32: 0 = no compression 46 | uint32: npoints 47 | pointdata[]: interpret relative to pcid 48 | ''' 49 | patchbin = unhexlify(pcpatch_wkb) 50 | npoints = unpack("I", patchbin[9:13])[0] 51 | dt = schema_dtype(schema) 52 | patch = np.fromstring(patchbin[13:], dtype=dt) 53 | # debug 54 | # print(patch[:10]) 55 | return patch, npoints 56 | 57 | 58 | def decompress(points, schema): 59 | """ 60 | Decode patch encoded with lazperf. 61 | 'points' is a pcpatch in wkb 62 | """ 63 | 64 | # retrieve number of points in wkb pgpointcloud patch 65 | npoints = patch_numpoints(points) 66 | hexbuffer = unhexlify(points[34:]) 67 | hexbuffer += hexa_signed_int32(npoints) 68 | 69 | # uncompress 70 | s = json.dumps(schema).replace("\\", "") 71 | dtype = buildNumpyDescription(json.loads(s)) 72 | lazdata = bytes(hexbuffer) 73 | 74 | arr = np.fromstring(lazdata, dtype=np.uint8) 75 | d = Decompressor(arr, s) 76 | output = np.zeros(npoints * dtype.itemsize, dtype=np.uint8) 77 | decompressed = d.decompress(output) 78 | 79 | return decompressed 80 | 81 | 82 | def compute_scale_for_cesium(coordmin, coordmax): 83 | ''' 84 | Cesium quantized positions need to be in uint16 85 | This function computes the best scale to apply to coordinates 86 | to fit the range [0, 65535] 87 | ''' 88 | max_int = np.iinfo(np.uint16).max 89 | delta = abs(coordmax - coordmin) 90 | scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10))) 91 | return scale 92 | 93 | 94 | def greyhound_types(typ): 95 | ''' 96 | https://github.com/hobu/greyhound/blob/master/doc/clientDevelopment.rst#schema 97 | ''' 98 | if typ[0] == 'u': 99 | return "unsigned" 100 | elif typ in ('double', 'float'): 101 | return "floating" 102 | return "signed" 103 | 104 | 105 | def write_in_cache(d, filename): 106 | path = os.path.join(Config.CACHE_DIR, filename) 107 | if not os.path.exists(Config.CACHE_DIR): 108 | os.mkdir(Config.CACHE_DIR) 109 | f = open(path, 'w') 110 | f.write(json.dumps(d)) 111 | f.close() 112 | 113 | 114 | def read_in_cache(filename): 115 | path = os.path.join(Config.CACHE_DIR, filename) 116 | 117 | d = {} 118 | if os.path.exists(path): 119 | d = json.load(open(path)) 120 | 121 | return d 122 | 123 | 124 | def iterable2pgarray(iterable): 125 | """Convert a python iterable to a postgresql array 126 | """ 127 | return '{' + ','.join([str(elem) for elem in iterable]) + '}' 128 | 129 | 130 | def decimal_default(obj): 131 | if isinstance(obj, decimal.Decimal): 132 | return float(obj) 133 | raise TypeError 134 | 135 | 136 | def list_from_str(list_str): 137 | """ 138 | Transform a string ['[', '1', '.', '5', ',', '2', ',', '3', ']'] 139 | to a list [1,2,3] 140 | """ 141 | return [float(val) for val in list_str[1:-1].split(',')] 142 | 143 | 144 | def boundingbox_to_polygon(box): 145 | """ 146 | input box = [xmin, ymin, zmin, xmax, ymax, zmax] 147 | output box = 'xmin ymin, xmax ymin, xmax ymax, xmin ymax, xmin ymin' 148 | """ 149 | boxstr = ( 150 | "{0} {1}, {2} {3}, {4} {5}, {6} {7}, {0} {1}" 151 | .format(box[0], box[1], box[3], box[1], box[3], box[4], box[0], box[4]) 152 | ) 153 | return boxstr 154 | 155 | 156 | def list_from_str_box(box_str): 157 | """ 158 | Transform a string 'BOX(xmin, ymin, xmax, ymax)' to 159 | a list [xmin, ymin, xmin, xmax] 160 | """ 161 | box_str = box_str.replace('BOX', '') 162 | box_str = box_str.replace('(', '') 163 | box_str = box_str.replace(')', '') 164 | box_str = box_str.replace(' ', ',') 165 | 166 | l = [float(x) for x in box_str.split(',')] 167 | return l 168 | 169 | 170 | def hexa_signed_int32(val): 171 | return pack('i', val) 172 | 173 | 174 | def hexa_signed_uint16(val): 175 | return pack('H', val) 176 | 177 | 178 | def hexa_signed_uint8(val): 179 | return pack('B', val) 180 | 181 | 182 | def patch_numpoints(pcpatch_wkb): 183 | '''get number of points in a patch 184 | ''' 185 | npoints_hexa = pcpatch_wkb[18:26] 186 | return unpack("I", unhexlify(npoints_hexa))[0] 187 | -------------------------------------------------------------------------------- /lopocs/wsgi.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from lopocs import create_app 3 | 4 | app = create_app() 5 | 6 | if __name__ == '__main__': 7 | app.run() 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import re 4 | from setuptools import setup, find_packages 5 | import subprocess 6 | 7 | GDAL_VERSION = subprocess.check_output(['gdal-config', '--version']).strip().decode() 8 | GDAL_VERSION, GDAL_REVISION_MAJOR, GDAL_REVISION_MINOR = GDAL_VERSION.split('.') 9 | GDAL_MIN = '{0}.{1}.{2}'.format(GDAL_VERSION, GDAL_REVISION_MAJOR, GDAL_REVISION_MINOR) 10 | GDAL_MAX = '{0}.{1}.{2}'.format(GDAL_VERSION, GDAL_REVISION_MAJOR, int(GDAL_REVISION_MINOR) + 1) 11 | 12 | here = os.path.abspath(os.path.dirname(__file__)) 13 | 14 | requirements = ( 15 | 'flask>=0.12', 16 | 'flask-restplus==0.10.1', 17 | 'flask-cors==3.0.2', 18 | 'psycopg2-binary>=2.6.2', 19 | 'pyyaml==5.2', 20 | 'pygdal >= {0}, <{1}'.format(GDAL_MIN, GDAL_MAX), 21 | 'redis==2.10.5', 22 | 'py3dtiles==1.0.2', 23 | 'click==6.7', 24 | 'requests==2.20.0', 25 | 'lazperf==1.2.1', 26 | 'numpy==1.14.3', 27 | 'pyproj==1.9.5.1', 28 | 'packaging==19.2' 29 | ) 30 | 31 | dev_requirements = ( 32 | 'pytest', 33 | 'flake8', 34 | 'invoke', 35 | 'pytest-cov', 36 | ) 37 | 38 | doc_requirements = ( 39 | 'sphinx', 40 | 'sphinx_rtd_theme', 41 | ) 42 | 43 | prod_requirements = ( 44 | 'uwsgi' 45 | ) 46 | 47 | 48 | def find_version(*file_paths): 49 | """ 50 | see https://github.com/pypa/sampleproject/blob/master/setup.py 51 | """ 52 | 53 | with open(os.path.join(here, *file_paths), 'r') as f: 54 | version_file = f.read() 55 | 56 | # The version line must have the form 57 | # __version__ = 'ver' 58 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 59 | version_file, re.M) 60 | if version_match: 61 | return version_match.group(1) 62 | raise RuntimeError("Unable to find version string. " 63 | "Should be at the first line of __init__.py.") 64 | 65 | 66 | setup( 67 | name='lopocs', 68 | version=find_version('lopocs', '__init__.py'), 69 | description="Light OpenSource PointCloud Server", 70 | url='https://github.com/LI3DS/lopocs', 71 | author='dev', 72 | author_email='contact@oslandia.com', 73 | license='LGPL2 or later', 74 | classifiers=[ 75 | 'Development Status :: 3 - Alpha', 76 | 'Intended Audience :: Developers', 77 | 'Programming Language :: Python :: 3.4', 78 | 'Programming Language :: Python :: 3.5', 79 | ], 80 | packages=find_packages(), 81 | include_package_data=True, 82 | test_suite='tests', 83 | install_requires=requirements, 84 | extras_require={ 85 | 'dev': dev_requirements, 86 | 'prod': prod_requirements, 87 | 'doc': doc_requirements 88 | }, 89 | entry_points={ 90 | 'console_scripts': ['lopocs = lopocs.cli:cli'], 91 | } 92 | ) 93 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oslandia/lopocs/4179cb829c395b9a1c47717a723a261e20e20279/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from lopocs import utils 2 | 3 | 4 | def test_list_from_str(): 5 | str_list = "[1, 5, 2, 3]" 6 | l = utils.list_from_str(str_list) 7 | assert l == [1, 5, 2, 3] 8 | 9 | 10 | def test_boundingbox_to_polygon(): 11 | bbox = [1, 2, 3, 4, 5, 6] 12 | poly = utils.boundingbox_to_polygon(bbox) 13 | poly_expected = '1 2, 4 2, 4 5, 1 5, 1 2' 14 | assert poly == poly_expected 15 | 16 | 17 | def test_list_from_str_box(): 18 | str_box = 'BOX(1 2 3 4)' 19 | l_box = utils.list_from_str_box(str_box) 20 | assert l_box == [1, 2, 3, 4] 21 | 22 | 23 | def test_compute_scales_cesium(): 24 | scale = utils.compute_scale_for_cesium(1.56, 1.80) 25 | assert scale == 1e-5 26 | scale = utils.compute_scale_for_cesium(4.5556e6, 4.5557e6) 27 | assert scale == 0.01 28 | scale = utils.compute_scale_for_cesium(4e5, 5e5) 29 | assert scale == 1 30 | scale = utils.compute_scale_for_cesium(100, 300000) 31 | assert scale == 1 32 | --------------------------------------------------------------------------------