├── .gitignore ├── LICENSE ├── README.md ├── docs ├── img │ ├── Figure3.jpg │ ├── Figure4.jpg │ ├── Figure5.jpg │ └── SF2.jpg ├── install_instructions.md ├── sample_camera_models │ ├── L1A_sample_camera_model.tsai │ └── L1B_sample_camera_model.tsai └── video_workflow.md ├── environment.yml ├── news.md ├── notebooks ├── bundle_adjustment_chamoli-dense.ipynb ├── evaluate_various_ba_params.ipynb ├── gm_triplet_eval.ipynb ├── lidar_processing │ ├── first_return_filter.json │ └── laz2dsm.sh ├── manuscript_figures_master-revision.ipynb ├── manuscript_figures_master.ipynb ├── reference_dems.ipynb ├── skysat_l1a_bundle_adjustment.ipynb ├── snow-on.ipynb ├── triplet_accuracy_assesment_plot.ipynb └── video_snow_depth.ipynb ├── scripts ├── ba_skysat.py ├── compare_dems.py ├── legacy │ ├── skysat_orthorectify.py │ ├── skysat_overlap.py │ ├── skysat_preprocess.py │ └── skysat_stereo_cli.py ├── optimise_raw_camera.py ├── plot_disparity.py ├── prep_dense_ba_run.py ├── reformat_frameindex.py ├── skysat_ctrack.pbs ├── skysat_dem_mos.py ├── skysat_orthorectify.py ├── skysat_overlap.py ├── skysat_pc_cam.py ├── skysat_preprocess.py ├── skysat_stereo_cli.py ├── skysat_triplet_pipeline.py └── skysat_video_pipeline.py ├── setup.py └── skysat_stereo ├── __init__.py ├── asp_utils.py ├── bundle_adjustment_lib.py ├── misc_geospatial.py ├── skysat.py └── skysat_stereo_workflow.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | #src folder for pip installs 132 | ./src/ 133 | 134 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Shashank Bhushan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4422248.svg)](https://doi.org/10.5281/zenodo.4422248) 2 | 3 | # skysat_stereo 4 | Tools and libraries for processing Planet SkySat imagery, including camera model refinement, stereo reconstruction, and orthomosaic production 5 | 6 | ## Introduction 7 | Planet operates a constellation of 19 SkySat-C SmallSats, which can acquire very-high-resolution (0.7 m to 0.9 m) triplet stereo and continuous video imagery with short revisit times. This provides an excellent opportunity to derive on-demand, high-resolution Digital Elevation Models (DEMs) for any point on the Earth's surface, with broad applications for Earth science research. However, the quality of these DEMs is currently limited by the geolocation accuracy of the default SkySat camera models, and few existing photogrammetry tools can process the SkySat images. 8 | 9 | ## Purpose 10 | We developed automated workflows to refine the SkySat camera models and produce accurate DEMs and orthomosaics. For additional details on the workflow and evaluation of output products, please see the corresponding [open-access publication](https://doi.org/10.1016/j.isprsjprs.2020.12.012) in the ISPRS Journal of Photogrammetry and Remote Sensing. This repository contains all tools and libraries as a supplement to the published manuscript. 11 | 12 | This project is under active development and we welcome contributions (information for contributors forthcoming) and preliminary feedback from early visitors (you) :) 13 | 14 | ## Contents 15 | #### [skysat_stereo](/skysat_stereo) - libraries used throughout the processing workflow 16 | - `asp_utils.py` - library of functions involving components of the NASA Ames Stereo Pipeline 17 | - `skysat.py` - library of functions specific for SkySat processing 18 | - `misc_geospatial.py` - miscelaneous functions for geospatial analysis and image processing 19 | 20 | #### [scripts](/scripts/) - command line utilities for the SkySat processing workflow. 21 | 1. [`skysat_overlap.py`](/scripts/skysat_overlap.py) - identifies overlapping scenes 22 | 2. [`skysat_preprocess.py`](/scripts/skysat_preprocess.py) - prepares subset of video scenes, generates [frame camera models](/docs/sample_camera_models/) 23 | 3. [`ba_skysat.py`](/scripts/ba_skysat.py) - bundle adjustment and camera refinement 24 | 4. [`skysat_stereo_cli.py`](/scripts/skysat_stereo_cli.py) - stereo reconstruction 25 | 5. [`skysat_dem_mos.py`](/scripts/skysat_dem_mos.py) - generates DEM composites with relative accuracy and count metrics 26 | 6. [`skysat_pc_cam.py`](/scripts/skysat_pc_cam.py) - point clouds gridding, DEM co-registration, export updated frame and RPC camera models 27 | 7. [`skysat_orthorectify.py`](/scripts/skysat_orthorectify.py) - orthorectify individual scenes and produce orthoimage mosaics 28 | 8. [`plot_disparity.py`](/scripts/plot_disparity.py) - visualize DEM, disparity map, stereo triangulation intersection error map 29 | 9. [`skysat_triplet_pipeline.py`](/scripts/skysat_triplet_pipeline.py) - wrapper script for end to end triplet stereo processing workflow 30 | 10. [`skysat_video_pipeline.py`](/scripts/skysat_video_pipeline.py) - wrapper script for end to end video stereo processing workflow 31 | #### [notebooks](/notebooks/) - notebooks used during analysis and figure preparation 32 | 33 | ## Input products supported 34 | 1. Triplet stereo or bi-stereo collections at **L1B level**. These images should be non-georeferenced and should have an accompanying RPC metadata with them. To make sure if you have the rpc information, do a gdalinfo in one of the images, and you should see RPC information printed out. 35 | 2. Video collections at **L1A level**. These images should be non-georeferenced, and should have a frame_index.csv file accompanying them, which contain the ground footprint and satellite attitude/ephemris data. 36 | 37 | While ordering data from Planet, please make sure the correct data level is specified :) 38 | 39 | At this stage, *we do not support the L1A full frame triplet stereo products*, but we have developed an internal workflow logic, and are in the process of writing actual code for it. Thanks for your patience and interest :) 40 | 41 | ## Sample output products 42 | ### SkySat Triplet Stereo 43 | ![triplet_product](/docs/img/Figure3.jpg) 44 | Figure 1: Orthoimage mosaic and DEM composite generated from a SkySat triplet stereo collection over Mt. Rainier, WA, USA. These final products were derived from L1B imagery that is © Planet, 2019 (Planet Team, 2017). 45 | 46 | ![triplet_accuracy](/docs/img/Figure4.jpg) 47 | Figure 2: Relative and absolute accuracy before (using Planet RPCs) and after the `skysat_stereo` correction workflow. 48 | 49 | ### SkySat Video 50 | ![video_samples](/docs/img/Figure5.jpg) 51 | Figure 3: Sample products from SkySat video collection over Mt. St. Helen's crater (after `skysat_stereo` correction workflow). These final products were derived from L1A imagery that is © Planet, 2019 (Planet Team, 2017). 52 | 53 | ## Dependencies 54 | - See [environment.yml file](/environment.yml) for complete list of Python packages with pinned version numbers. 55 | - [NASA Ames Stereo Pipeline v 3.0.1 alpha (April 22 2022)](https://stereopipeline.readthedocs.io/en/latest/) 56 | 57 | ## Installation 58 | Please see the [install instructions](/docs/install_instructions.md). 59 | 60 | Notes: 61 | * These tools were developed and tested on a dedicated [Broadwell node](https://www.nas.nasa.gov/hecc/resources/pleiades.html) on the NASA Pleiades supercomputer, running SUSE Linux Enterprise Server. 62 | * Many tools use parallel threads and/or processes, and the hardcoded number of threads and processes were tuned based on the available resources (28 CPUs, 128 GB RAM). Some utilities should autoscale based on available resources, but others may require modifications for optimization on other systems. 63 | * The code should work for \*nix platforms. We have not tested on Windows. 64 | 65 | ## License 66 | This project is licensed under the terms of the [MIT License](./LICENSE). 67 | 68 | ## Citation 69 | If you use this code and/or derived products in a scientific publication, please cite: 70 | * Bhushan, S., Shean, D., Alexandrov, O., & Henderson, S. (2021). Automated digital elevation model (DEM) generation from very-high-resolution Planet SkySat triplet stereo and video imagery. *ISPRS Journal of Photogrammetry and Remote Sensing*, 173, 151–165. https://doi.org/10.1016/j.isprsjprs.2020.12.012. 71 | * Shashank Bhushan, David Shean, Oleg Alexandrov, & Scott Henderson. (2021, January 7). uw-cryo/skysat_stereo: Zenodo doi revision updates (Version 0.2). Zenodo. http://doi.org/10.5281/zenodo.4422248 72 | 73 | ## Funding and Acknowledgments 74 | * This research was supported by the NASA Terrestrial Hydrology Program (THP) and the NASA Cryosphere Program. Shashank Bhushan was supported by a NASA FINESST award (80NSSC19K1338) and the NASA HiMAT project (NNX16AQ88G). David Shean, Oleg Alexandrov and Scott Henderson were supported by NASA THP award 80NSSC18K1405. SkySat tasking, data access, and supplemental support was provided under the [NASA Commercial Smallsat Data Acquisition Program 2018 Pilot Study](https://sit.earthdata.nasa.gov/about/small-satellite-commercial-data-buy-program) 75 | * We acknowledge Compton J. Tucker and others at NASA Goddard Space Flight Center and NASA Headquarters for coordinating the Commercial Satellite Data Access Program Pilot and assisting with prelimnary SkySat tasking campaigns. Paris Good at Planet provided invaluable assistance with data acquisition and facilitated discussions with Planet engineering teams. Thanks are also due to Ross Losher, Antonio Martos, Kelsey Jordahl and others at Planet for initial guidance on SkySat-C sensor specifications and camera models. Resources supporting this work were provided by the NASA High-End Computing (HEC) Program through the NASA Advanced Supercomputing (NAS) Division at Ames Research Center. Friedrich Knuth and Michelle Hu provided feedback on initial manuscript outline, code development and documentation. We also acknowledge input from the larger ASP community during photogrammetry discussions. 76 | 77 | ## References 78 | * Beyer, Ross A., Oleg Alexandrov, and Scott McMichael. 2018. The Ames Stereo Pipeline: NASA's open source software for deriving and processing terrain data, *Earth and Space Science*, 5. https://doi.org/10.1029/2018EA000409. 79 | * Shean, D. E., O. Alexandrov, Z. Moratto, B. E. Smith, I. R. Joughin, C. C. Porter, Morin, P. J. 2016. An automated, open-source pipeline for mass production of digital elevation models (DEMs) from very high-resolution commercial stereo satellite imagery. *ISPRS Journal of Photogrammetry and Remote Sensing*, 116. https://doi.org/10.1016/j.isprsjprs.2016.03.012. 80 | * Ross Beyer, Oleg Alexandrov, ScottMcMichael, Michael Broxton, Mike Lundy, Kyle Husmann, … jlaura. (2020, July 28). NeoGeographyToolkit/StereoPipeline 2.7.0 (Version 2.7.0). Zenodo. http://doi.org/10.5281/zenodo.3963341 81 | * Planet application program interface: In space for life on earth. San Francisco, CA. https://api.planet.com. 82 | -------------------------------------------------------------------------------- /docs/img/Figure3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uw-cryo/skysat_stereo/f29737e731de70ecda30c095fd13e872d8954c91/docs/img/Figure3.jpg -------------------------------------------------------------------------------- /docs/img/Figure4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uw-cryo/skysat_stereo/f29737e731de70ecda30c095fd13e872d8954c91/docs/img/Figure4.jpg -------------------------------------------------------------------------------- /docs/img/Figure5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uw-cryo/skysat_stereo/f29737e731de70ecda30c095fd13e872d8954c91/docs/img/Figure5.jpg -------------------------------------------------------------------------------- /docs/img/SF2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uw-cryo/skysat_stereo/f29737e731de70ecda30c095fd13e872d8954c91/docs/img/SF2.jpg -------------------------------------------------------------------------------- /docs/install_instructions.md: -------------------------------------------------------------------------------- 1 | # Installation for skysat_stereo 2 | 3 | - We use srtm4 package, which for now needs developer version of libtiff 4 | - So first of all, install libtif on your machine using: `apt-get install libtiff-dev` for Ubuntu or `brew install libtiff` for (macOS) 5 | - Download Ames Stereo Pipeline (v 3.2.0) executables from [here](https://github.com/NeoGeographyToolkit/StereoPipeline/releases/download/3.1.0/StereoPipeline-3.1.0-2022-05-18-x86_64-Linux.tar.bz2)). Untar the downloaded file, and add the `bin` folder in the untarred folder to your `.bashrc` profile. 6 | - Clone the `skysat_stereo` repo to the location of your choice using `https://github.com/uw-cryo/skysat_stereo.git` 7 | - We recommend using conda for managing packages. Powerusers can have a look at the `environment.yml` file in the github repository to make an environment using that. 8 | - Otherwise, one can simply initiate a conda environment to avoid conflicts using the environment.yml file. 9 | - Run the command `conda env create skysat_stereo/environment.yml`. This will create a new environemnt `skysat_stereo` containing all dependencies. 10 | - Each time a new terminal is opened, activate the environment using `conda activate skysat_stereo`. 11 | - Activate the skysat_stereo environment, and install the repository in editable command: `pip install -e skysat_stereo/` 12 | - This will install all library files (the fancy apis), to run the command line calls, these scripts need to added path. 13 | - To use the command line executables located in the `scripts` directory of `skysat_stereo` directory, add the `skysat_stereo/scripts/` path to your `.bashrc` as well. A guide on how to add paths to your .bashrc can be found [here](https://gist.github.com/nex3/c395b2f8fd4b02068be37c961301caa7). 14 | - If any of this sounds confusing, please refer to this [guide](https://github.com/dshean/demcoreg/blob/master/docs/beginners_doc.md) which has tricks for installing packages/enivronment using conda for new users. 15 | -------------------------------------------------------------------------------- /docs/sample_camera_models/L1A_sample_camera_model.tsai: -------------------------------------------------------------------------------- 1 | VERSION_4 2 | PINHOLE 3 | fu = 553846.15384599997 4 | fv = 553846.15384599997 5 | cu = 1280 6 | cv = 540 7 | u_direction = 1 0 0 8 | v_direction = 0 1 0 9 | w_direction = 0 0 1 10 | C = -2317379.6147307232 -3871635.8870806298 5188634.3755841274 11 | R = 0.89049592909915543 -0.45430359544735871 0.025004867954744082 -0.44251908425470804 -0.8519976095289028 0.27978015195401096 -0.10580104124348777 -0.26020821762703317 -0.95973841391868642 12 | pitch = 1 13 | NULL 14 | -------------------------------------------------------------------------------- /docs/sample_camera_models/L1B_sample_camera_model.tsai: -------------------------------------------------------------------------------- 1 | VERSION_4 2 | PINHOLE 3 | fu = 553846.15384599997 4 | fv = 553846.15384599997 5 | cu = 1280 6 | cv = 540 7 | u_direction = 1 0 0 8 | v_direction = 0 1 0 9 | w_direction = 0 0 1 10 | C = -2317379.6147307232 -3871635.8870806298 5188634.3755841274 11 | R = 0.89049592909915543 -0.45430359544735871 0.025004867954744082 -0.44251908425470804 -0.8519976095289028 0.27978015195401096 -0.10580104124348777 -0.26020821762703317 -0.95973841391868642 12 | pitch = 0.80000000000000004 13 | NULL 14 | -------------------------------------------------------------------------------- /docs/video_workflow.md: -------------------------------------------------------------------------------- 1 | # workflow for generating DEMs from SkySat-C L1A videos 2 | 3 | ## Preprocessing: 4 | 5 | In the preprocessing step, images are sampled (saved as truncated frame_index csv file) at a given interval and frame camera models, gcp files are written 6 | - Run `skysat_preprocess.py` as: 7 | 8 | ```skysat_preprocess.py -mode video -t pinhole -img video_path/frames/ -video_sampling_mode: num_images -sampler 60 -outdir subsampled_frames_dir -frame_index video_path/frame_index.csv -dem path_to_reference_DEM -product l1a``` 9 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: skysat_stereo 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | # core data science 6 | - python 7 | - scipy 8 | - numpy 9 | - pandas 10 | - matplotlib 11 | # geospatial (raster+vector) 12 | - gdal 13 | - rasterio 14 | - geopandas 15 | - pygeos 16 | - shapely 17 | - contextily 18 | - descartes 19 | # misc 20 | - pip 21 | - psutil 22 | - pip: 23 | # rotation_matrices 24 | - pyquaternion 25 | # misc 26 | - p_tqdm 27 | # geospatial+plotting 28 | - "--editable=git+https://github.com/dshean/pygeotools.git#egg=pygeotools" 29 | - "--editable=git+https://github.com/dshean/demcoreg.git#egg=demcoreg" 30 | - "--editable=git+https://github.com/ShashankBice/imview.git@isprs_version#egg=imview" 31 | - "--editable=git+https://github.com/cmla/rpcm.git#egg=rpcm" 32 | # review and add bare 33 | -------------------------------------------------------------------------------- /news.md: -------------------------------------------------------------------------------- 1 | Update Notes for upcoming releases 2 | ### 0.3 3 | * Option to run from a particular stage in stereo (preprocessing, correlation, refinement, filter and triangulation) (added for triplet currently) 4 | * Allow for controlling camera weight by the user during bundle_adjust/ adjust weight based on initial reprojection error (in progress) 5 | * Extend logic of reading UTM zones from even unprojected Point Clouds (added) 6 | * Virtual GCP support in bundle adjustment (in progress) 7 | * Cross-track (multi-view triplet,mono support) 8 | * Bundle adjustment (in progress) 9 | * Stereo processing (done) 10 | * DEM mosaicking (done) 11 | * orthorectification (done) 12 | 13 | 14 | * Ability to run processing for scenes within a bounding box 15 | * Support added in skysat_overlap.py to write out overlapping pairs which are only intersect a bounding box (done) 16 | * This overlap list can be ingested by subsequent updated orthorectification and stereo processing and DEM processing programs (done) 17 | * Bundle adjustment (in progress) 18 | * Update to newer versions of GDAL (>3.0 vs the previous release 2.4) and Proj (previous release is 4) 19 | * Update of wrapper scripts to implement the above changes (in progress) 20 | 21 | * Utility functions 22 | * NED Euler angles (yaw-pitch-roll) from ECEF rotation matrix (supplied by ASP) 23 | * Update newer frame_index.csv provided by Planet for videos and L1A triplet all frames to something which ASP understands 24 | * Find and match ip across image pairs without camera models info 25 | 26 | -------------------------------------------------------------------------------- /notebooks/lidar_processing/first_return_filter.json: -------------------------------------------------------------------------------- 1 | {"pipeline": [{"type": "readers.las","default_srs": "EPSG:26910"}, {"type": "filters.returns", "groups": "first,only"}, {"type": "writers.las", "compression": "true", "forward": "all"}]} 2 | -------------------------------------------------------------------------------- /notebooks/lidar_processing/laz2dsm.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # laz files downloaded from USGS ftp server: ftp://rockyftp.cr.usgs.gov/vdelivery/Datasets/Staged/Elevation/LPC/Projects/WA_MountRanier_2007-2008/ 3 | #Retrieve first returns or only returns from LIDAR laz files 4 | # the pdal settings are available in the json file 5 | # Init projection is EPSG:26910, adjusted to NAVD88 Datum 6 | # projection info was gleaned by the metadata.xml obtained by downloading any 1 las.zip from the las/ folder 7 | #cmd 8 | parallel --progress "pdal pipeline first_return_filter.json --readers.las.filename={} --writers.las.filename={.}_first_return.laz" ::: *.laz 9 | # Use ASP point2dem to grid the point cloud tiles 10 | # DEM posted at 1 m resolution, with 95 percentile stats filter for aggregrating points within the grid 11 | #cmd 12 | parallel --progress "point2dem --t_srs EPSG:32610 --tr 1 --filter 95-pct {}" ::: *first_return.laz 13 | # compose a VRT file from all the tiled DEMs 14 | #cmd 15 | gdalbuildvrt rainier_lidar_dsm.vrt *DEM.tif 16 | # convert to cloud optimised geotiff 17 | #cmd 18 | gdal_translate -co TILED=YES -co COMPRESS=LZW -co BIGTIFF=IF_SAFER -co COPY_SRC_OVERVIEWS=YES -co COMPRESS_OVERVIEW=YES -co NUM_THREADS=ALL_CPUS -co PREDICTOR=3 rainier_lidar_dsm.vrt rainier_lidar_dsm.tif 19 | # adjust for geoid 20 | #cmd 21 | dem_geoid --reverse-adjustment rainier_lidar_dsm.tif 22 | -------------------------------------------------------------------------------- /scripts/ba_skysat.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import os,sys,glob,shutil 3 | import subprocess 4 | import argparse 5 | from distutils.spawn import find_executable 6 | from pygeotools.lib import iolib,malib 7 | import geopandas as gpd 8 | import numpy as np 9 | from datetime import datetime 10 | import pandas as pd 11 | from multiprocessing import cpu_count 12 | 13 | # Usage: ba_skysat.py -mode full_video,full_triplet,quick_transform_pc_align,general_ba -t pinhole,rpc -img image_folder -cam optional (rpc might not require it) -ba_prefix out_ba -overlap-list -init_transform -gcp gcp_folder or file 14 | # TODO: 15 | # Keep other passed arguments flexible for extending as general purpose, like gcp_list. Others which go into ba_opt can be checked with None construct when variables are initailized in main command 16 | # maybe put all arguments and check if os.path.abspath can be done during runtime from the get_ba_opts function 17 | 18 | 19 | def run_cmd(bin, args, **kw): 20 | # Note, need to add full executable 21 | # from dshean/vmap.py 22 | #binpath = os.path.join('/home/sbhushan/src/StereoPipeline/bin',bin) 23 | binpath = find_executable(bin) 24 | if binpath is None: 25 | msg = ("Unable to find executable %s\n" 26 | "Install ASP and ensure it is in your PATH env variable\n" 27 | "https://ti.arc.nasa.gov/tech/asr/intelligent-robotics/ngt/stereo/") 28 | sys.exit(msg) 29 | # binpath = os.path.join('/opt/StereoPipeline/bin/',bin) 30 | call = [binpath, ] 31 | print(call) 32 | call.extend(args) 33 | print(call) 34 | # print(type(call)) 35 | # print(' '.join(call)) 36 | try: 37 | code = subprocess.call(call, shell=False) 38 | except OSError as e: 39 | raise Exception('%s: %s' % (binpath, e)) 40 | if code != 0: 41 | raise Exception('ASP step ' + kw['msg'] + ' failed') 42 | 43 | 44 | def get_ba_opts(ba_prefix, ip_per_tile=4000,camera_weight=None,translation_weight=0.4,rotation_weight=0,fixed_cam_idx=None,overlap_list=None, robust_threshold=None, overlap_limit=None, initial_transform=None, input_adjustments=None, flavor='general_ba', session='nadirpinhole', gcp_transform=False,num_iterations=2000,num_pass=2,lon_lat_limit=None,elevation_limit=None): 45 | ba_opt = [] 46 | # allow CERES to use multi-threads 47 | ba_opt.extend(['--threads', str(cpu_count())]) 48 | #ba_opt.extend(['--threads', '1']) 49 | ba_opt.extend(['-o', ba_prefix]) 50 | 51 | # keypoint-finding args 52 | # relax triangulation error based filters to account for initial camera errors 53 | ba_opt.extend(['--min-matches', '4']) 54 | ba_opt.extend(['--disable-tri-ip-filter']) 55 | ba_opt.extend(['--force-reuse-match-files']) 56 | ba_opt.extend(['--ip-per-tile', str(ip_per_tile)]) 57 | ba_opt.extend(['--ip-inlier-factor', '0.2']) 58 | ba_opt.extend(['--ip-num-ransac-iterations', '1000']) 59 | ba_opt.extend(['--skip-rough-homography']) 60 | ba_opt.extend(['--min-triangulation-angle', '0.0001']) 61 | 62 | # Save control network created from match points 63 | ba_opt.extend(['--save-cnet-as-csv']) 64 | 65 | # Individually normalize images to properly stretch constrant 66 | # Helpful in keypoint detection 67 | ba_opt.extend(['--individually-normalize']) 68 | 69 | if robust_threshold is not None: 70 | # make the solver focus more on mininizing very high reporjection errors 71 | ba_opt.extend(['--robust-threshold', str(robust_threshold)]) 72 | 73 | if camera_weight is not None: 74 | # this generally assigns weight to penalise movement of camera parameters (Default:0) 75 | ba_opt.extend(['--camera-weight', str(camera_weight)]) 76 | else: 77 | # this is more fine grained, will pinalize translation but allow rotation parameters update 78 | ba_opt.extend(['--translation-weight',str(translation_weight)]) 79 | ba_opt.extend(['--rotation-weight',str(rotation_weight)]) 80 | 81 | if fixed_cam_idx is not None: 82 | # parameters for cameras at the specified indices will not be floated during optimisation 83 | ba_opt.extend(['--fixed-camera-indices',' '.join(fixed_cam_idx.astype(str))]) 84 | ba_opt.extend(['-t', session]) 85 | 86 | # filter points based on reprojection errors before running a new pass 87 | ba_opt.extend(['--remove-outliers-params', '75 3 5 6']) 88 | 89 | # How about adding num random passes here ? Think about it, it might help if we are getting stuck in local minima :) 90 | if session == 'nadirpinhole': 91 | ba_opt.extend(['--inline-adjustments']) 92 | # write out a new camera model file with updated parameters 93 | 94 | # specify number of passes and maximum iterations per pass 95 | ba_opt.extend(['--num-iterations', str(num_iterations)]) 96 | ba_opt.extend(['--num-passes', str(num_pass)]) 97 | #ba_opt.extend(['--parameter-tolerance','1e-14']) 98 | 99 | if gcp_transform: 100 | ba_opt.extend(['--transform-cameras-using-gcp']) 101 | 102 | if initial_transform: 103 | ba_opt.extend(['--initial-transform', initial_transform]) 104 | if input_adjustments: 105 | ba_opt.extend(['--input-adjustments', input_adjustments]) 106 | 107 | # these 2 parameters determine which image pairs to use for feature matching 108 | # only the selected pairs are used in formation of the bundle adjustment control network 109 | # video is a sequence of overlapping scenes, so we use an overlap limit 110 | # triplet stereo uses list of overlapping pairs 111 | if overlap_limit: 112 | ba_opt.extend(['--overlap-limit',str(overlap_limit)]) 113 | if overlap_list: 114 | ba_opt.extend(['--overlap-list', overlap_list]) 115 | 116 | # these two params are not used generally. 117 | if lon_lat_limit: 118 | ba_opt.extend(['--lon-lat-limit',str(lon_lat_limit[0]),str(lon_lat_limit[1]),str(lon_lat_limit[2]),str(lon_lat_limit[3])]) 119 | if elevation_limit: 120 | ba_opt.extend(['--elevation-limit',str(elevation_limit[0]),str(elevation_limit[1])]) 121 | 122 | return ba_opt 123 | 124 | 125 | def getparser(): 126 | parser = argparse.ArgumentParser( 127 | description='Script for performing bundle adjustment, with several custom flavors built-in based on recent use-cases') 128 | ba_choices = ['full_video', 'full_triplet', 129 | 'transform_pc_align', 'general_ba'] 130 | parser.add_argument('-mode', default='full_video', choices=ba_choices, 131 | help='bundle adjust workflow to implement (default: %(default)s)') 132 | session_choices = ['nadirpinhole', 'rpc'] 133 | parser.add_argument('-t', default='nadirpinhole', choices=session_choices, 134 | help='choose between pinhole and rpc mode (default: %(default)s)') 135 | parser.add_argument('-ba_prefix', default=None, 136 | help='output prefix for ba output', required=True) 137 | parser.add_argument('-img', default=None, 138 | help='directory containing images', required=True) 139 | parser.add_argument( 140 | '-cam', default=None, help='directory containing cameras, if using pinhole. RPC model expects information in GDAL header') 141 | # parser.add_argument('-gcp',default=None,help='list of gcps',nargs='+',required=False) 142 | parser.add_argument('-gcp', default=None, 143 | help='folder containing list of gcps', required=False) 144 | parser.add_argument('-initial_transform', default=None, 145 | help='.txt file produced by pc_align, which can be used to translate cameras to that position') 146 | parser.add_argument('-input_adjustments', default=None, 147 | help='ba_prefix from previous ba_run if using RPC or not using inline adjustments with pinhole') 148 | parser.add_argument('-overlap_list', default=None, 149 | help='list containing pairs for which feature matching will be restricted to') 150 | parser.add_argument('-overlap_limit', default=20, 151 | help='default overlap limit for video sequence over which feature would be matched (default: %(default)s)') 152 | parser.add_argument('-frame_index',default=None,help='subsampled frame_index.csv produced by preprocessing script (default: %(default)s)') 153 | parser.add_argument('-num_iter',default=2000,help='defualt number of iterations (default: %(default)s)') 154 | parser.add_argument('-num_pass',default=2,help='defualt number of solver passes, eliminating points with high reprojection error at each pass (default: %(default)s)') 155 | camera_param_float_ch = ['trans+rot','rot_only'] 156 | parser.add_argument('-camera_param2float',type=str,default='trans+rot',choices=camera_param_float_ch,help='either float translation and rotation parameters freely, or enforce a higher tranlsation weight and allow free float of rotation parameters, incase the satellite positions are known accurately.') 157 | parser.add_argument('-dem',default=None,help='DEM to filter match points after optimization') 158 | parser.add_argument('-bound',default=None,help='Bound shapefile to limit extent of match points after optimization') 159 | return parser 160 | 161 | 162 | def main(): 163 | parser = getparser() 164 | args = parser.parse_args() 165 | img = args.img 166 | # populate image list 167 | img_list = sorted(glob.glob(os.path.join(img, '*.tif'))) 168 | if len(img_list) < 2: 169 | img_list = sorted(glob.glob(os.path.join(img, '*.tiff'))) 170 | #img_list = [os.path.basename(x) for x in img_list] 171 | if os.path.islink(img_list[0]): 172 | img_list = [os.readlink(x) for x in img_list] 173 | 174 | # populate camera model list 175 | if args.cam: 176 | cam = os.path.abspath(args.cam) 177 | if 'run' in os.path.basename(cam): 178 | cam_list = sorted(glob.glob(cam+'-*.tsai')) 179 | else: 180 | cam_list = sorted(glob.glob(os.path.join(cam, '*.tsai'))) 181 | cam_list = cam_list[:len(img_list)] 182 | 183 | session = args.t 184 | 185 | # output ba_prefix 186 | if args.ba_prefix: 187 | ba_prefix = os.path.abspath(args.ba_prefix) 188 | 189 | if args.initial_transform: 190 | initial_transform = os.path.abspath(initial_transform) 191 | if args.input_adjustments: 192 | input_adjustments = os.path.abspath(input_adjustments) 193 | 194 | # triplet stereo overlap list 195 | if args.overlap_list: 196 | overlap_list = os.path.abspath(args.overlap_list) 197 | 198 | # Populate GCP list 199 | if args.gcp: 200 | gcp_list = sorted(glob.glob(os.path.join(args.gcp, '*.gcp'))) 201 | 202 | mode = args.mode 203 | if args.bound: 204 | bound = gpd.read_file(args.bound) 205 | geo_crs = {'init':'epsg:4326'} 206 | if bound.crs is not geo_crs: 207 | bound = bound.to_crs(geo_crs) 208 | lon_min,lat_min,lon_max,lat_max = bound.total_bounds 209 | 210 | # Select whether to float both translation/rotation, or only rotation 211 | if args.camera_param2float == 'trans+rot': 212 | cam_wt = 0 213 | else: 214 | # this will invoke adjustment with rotation weight of 0 and translation weight of 0.4 215 | cam_wt = None 216 | print(f"Camera weight is {cam_wt}") 217 | 218 | # not commonly used 219 | if args.dem: 220 | dem = iolib.fn_getma(args.dem) 221 | dem_stats = malib.get_stats_dict(dem) 222 | min_elev,max_elev = [dem_stats['min']-500,dem_stats['max']+500] 223 | 224 | if mode == 'full_video': 225 | # read subsampled frame index, populate gcp, image and camera models appropriately 226 | frame_index = args.frame_index 227 | df = pd.read_csv(frame_index) 228 | gcp = os.path.abspath(args.gcp) 229 | 230 | # block to determine automatically overlap limit of 40 seconds for computing match points 231 | df['dt'] = [datetime.strptime(date.split('+00:00')[0],'%Y-%m-%dT%H:%M:%S.%f') for date in df.datetime.values] 232 | delta = (df.dt.values[1]-df.dt.values[0])/np.timedelta64(1, 's') 233 | # i hardocde overlap limit to have 40 seconds coverage 234 | overlap_limit = np.int(np.ceil(40/delta)) 235 | print("Calculated overlap limit as {}".format(overlap_limit)) 236 | 237 | img_list = [glob.glob(os.path.join(img,'*{}*.tiff'.format(x)))[0] for x in df.name.values] 238 | cam_list = [glob.glob(os.path.join(cam,'*{}*.tsai'.format(x)))[0] for x in df.name.values] 239 | gcp_list = [glob.glob(os.path.join(gcp,'*{}*.gcp'.format(x)))[0] for x in df.name.values] 240 | #also append the clean gcp here 241 | print(os.path.join(gcp,'*clean*_gcp.gcp')) 242 | gcp_list.append(glob.glob(os.path.join(gcp,'*clean*_gcp.gcp'))[0]) 243 | 244 | # this attempt did not work here 245 | # but given videos small footprint, the median (scale)+trans+rotation is good enough for all terrain 246 | # so reverting back to them 247 | #stereo_baseline = 10 248 | #fix_cam_idx = np.array([0]+[0+stereo_baseline]) 249 | #ip_per_tile is switched to default, as die to high scene to scene overlap and limited perspective difference, this produces abundant matches 250 | 251 | round1_opts = get_ba_opts( 252 | ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_1', session=session,ip_per_tile=4000, 253 | num_iterations=args.num_iter,num_pass=args.num_pass,camera_weight=cam_wt,fixed_cam_idx=None,robust_threshold=None) 254 | print("Running round 1 bundle adjustment for input video sequence") 255 | if session == 'nadirpinhole': 256 | ba_args = img_list+cam_list 257 | else: 258 | ba_args = img_list 259 | # Check if this command executed till last 260 | print('Running bundle adjustment round1') 261 | run_cmd('bundle_adjust', round1_opts+ba_args) 262 | 263 | # Make files used to evaluate solution quality 264 | init_residual_fn_def = sorted(glob.glob(ba_prefix+'*initial*no_loss_*pointmap*.csv'))[0] 265 | init_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*initial_residuals_no_loss_function_raw_pixels.txt'))[0] 266 | init_per_cam_reproj_err_disk = os.path.splitext(init_per_cam_reproj_err)[0]+'_initial_per_cam_reproj_error.txt' 267 | init_residual_fn = os.path.splitext(init_residual_fn_def)[0]+'_initial_reproj_error.csv' 268 | shutil.copy2(init_residual_fn_def,init_residual_fn) 269 | shutil.copy2(init_per_cam_reproj_err,init_per_cam_reproj_err_disk) 270 | # Copy final reprojection error files before transforming cameras 271 | final_residual_fn_def = sorted(glob.glob(ba_prefix+'*final*no_loss_*pointmap*.csv'))[0] 272 | final_residual_fn = os.path.splitext(final_residual_fn_def)[0]+'_final_reproj_error.csv' 273 | final_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*final_residuals_no_loss_function_raw_pixels.txt'))[0] 274 | final_per_cam_reproj_err_disk = os.path.splitext(final_per_cam_reproj_err)[0]+'_final_per_cam_reproj_error.txt' 275 | shutil.copy2(final_residual_fn_def,final_residual_fn) 276 | shutil.copy2(final_per_cam_reproj_err,final_per_cam_reproj_err_disk) 277 | 278 | if session == 'nadirpinhole': 279 | # prepare for second run to apply a constant transform to the self-consistent models using initial ground footprints 280 | identifier = os.path.basename(cam_list[0]).split(df.name.values[0])[0] 281 | print(ba_prefix+identifier+'-{}*.tsai'.format(df.name.values[0])) 282 | cam_list = [glob.glob(ba_prefix+identifier+'-{}*.tsai'.format(img))[0] for img in df.name.values] 283 | print(len(cam_list)) 284 | ba_args = img_list+cam_list+gcp_list 285 | 286 | #fixed_cam_idx2 = np.delete(np.arange(len(img_list),dtype=int),fix_cam_idx) 287 | round2_opts = get_ba_opts( 288 | ba_prefix, overlap_limit = overlap_limit, flavor='2round_gcp_2', session=session, gcp_transform=True,camera_weight=0, 289 | num_iterations=0,num_pass=1) 290 | else: 291 | # round 1 is adjust file 292 | input_adjustments = ba_prefix 293 | round2_opts = get_ba_opts( 294 | ba_prefix, overlap_limit = overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session) 295 | ba_args = img_list+gcp_list 296 | print("running round 2 bundle adjustment for input video sequence") 297 | run_cmd('bundle_adjust', round2_opts+ba_args) 298 | 299 | elif mode == 'full_triplet': 300 | if args.overlap_list is None: 301 | print( 302 | "Attempted bundle adjust will be expensive, will try to find matches in each and every pair") 303 | # the concept is simple 304 | #first 3 cameras, and then corresponding first three cameras from next collection are fixed in the first go 305 | # these serve as a kind of #GCP, preventing a large drift in the triangulated points/camera extrinsics during optimization 306 | img_time_identifier_list = np.array([os.path.basename(img).split('_')[1] for img in img_list]) 307 | img_time_unique_list = np.unique(img_time_identifier_list) 308 | second_collection_list = np.where(img_time_identifier_list == img_time_unique_list[1])[0][[0,1,2]] 309 | fix_cam_idx = np.array([0,1,2]+list(second_collection_list)) 310 | print(type(fix_cam_idx)) 311 | 312 | round1_opts = get_ba_opts( 313 | ba_prefix, session=session,num_iterations=args.num_iter,num_pass=args.num_pass,fixed_cam_idx=fix_cam_idx,overlap_list=args.overlap_list,camera_weight=cam_wt) 314 | # enter round2_opts here only ? 315 | if session == 'nadirpinhole': 316 | ba_args = img_list+ cam_list 317 | else: 318 | ba_args = img_list 319 | print("Running round 1 bundle adjustment for given triplet stereo combination") 320 | run_cmd('bundle_adjust', round1_opts+ba_args) 321 | 322 | # Save the first and foremost bundle adjustment reprojection error file 323 | init_residual_fn_def = sorted(glob.glob(ba_prefix+'*initial*no_loss_*pointmap*.csv'))[0] 324 | init_residual_fn = os.path.splitext(init_residual_fn_def)[0]+'_initial_reproj_error.csv' 325 | init_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*initial_residuals_no_loss_function_raw_pixels.txt'))[0] 326 | init_per_cam_reproj_err_disk = os.path.splitext(init_per_cam_reproj_err)[0]+'_initial_per_cam_reproj_error.txt' 327 | shutil.copy2(init_residual_fn_def,init_residual_fn) 328 | shutil.copy2(init_per_cam_reproj_err,init_per_cam_reproj_err_disk) 329 | 330 | if session == 'nadirpinhole': 331 | identifier = os.path.basename(cam_list[0]).split('_',14)[0][:2] 332 | print(ba_prefix+'-{}*.tsai'.format(identifier)) 333 | cam_list = sorted(glob.glob(os.path.join(ba_prefix+ '-{}*.tsai'.format(identifier)))) 334 | ba_args = img_list+cam_list 335 | fixed_cam_idx2 = np.delete(np.arange(len(img_list),dtype=int),fix_cam_idx) 336 | round2_opts = get_ba_opts(ba_prefix, overlap_list=overlap_list,session=session, fixed_cam_idx=fixed_cam_idx2,camera_weight=cam_wt) 337 | else: 338 | # round 1 is adjust file 339 | # Only camera model parameters for the first three stereo pairs float in this round 340 | input_adjustments = ba_prefix 341 | round2_opts = get_ba_opts( 342 | ba_prefix, overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session, 343 | elevation_limit=[min_elev,max_elev],lon_lat_limit=[lon_min,lat_min,lon_max,lat_max]) 344 | ba_args = img_list+gcp_list 345 | 346 | print("running round 2 bundle adjustment for given triplet stereo combination") 347 | run_cmd('bundle_adjust', round2_opts+ba_args) 348 | 349 | # Save state for final condition reprojection errors for the sparse triangulated points 350 | final_residual_fn_def = sorted(glob.glob(ba_prefix+'*final*no_loss_*pointmap*.csv'))[0] 351 | final_residual_fn = os.path.splitext(final_residual_fn_def)[0]+'_final_reproj_error.csv' 352 | shutil.copy2(final_residual_fn_def,final_residual_fn) 353 | final_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*final_residuals_no_loss_function_raw_pixels.txt'))[0] 354 | final_per_cam_reproj_err_disk = os.path.splitext(final_per_cam_reproj_err)[0]+'_final_per_cam_reproj_error.txt' 355 | shutil.copy2(final_per_cam_reproj_err,final_per_cam_reproj_err_disk) 356 | 357 | 358 | # input is just a transform from pc_align or something similar with no optimization 359 | if mode == 'transform_pc_align': 360 | if session == 'nadirpinhole': 361 | if args.gcp: 362 | ba_args = img_list+cam_list+gcp_list 363 | ba_opt = get_ba_opts(ba_prefix,overlap_list,flavor='2round_gcp_2',session=session,gcp_transform=True) 364 | else: 365 | ba_args = img_list+cam_list+gcp_list 366 | ba_opt = get_ba_opts(ba_prefix,overlap_list,flavor='2round_gcp_2',session=session,gcp_transform=True) 367 | else: 368 | if args.gcp: 369 | ba_args = img_list+gcp_list 370 | ba_opt = get_ba_opts(ba_prefix,overlap_list,initial_transform=initial_transform,flavor='2round_gcp_2',session=session,gcp_transform=True) 371 | else: 372 | ba_args = img_list+gcp_list 373 | ba_opt = get_ba_opts(ba_prefix,overlap_list,initial_transform=initial_transform,flavor='2round_gcp_2',session=session,gcp_transform=True) 374 | print("Simply transforming the cameras without optimization") 375 | run_cmd('bundle_adjust',ba_opt+ba_args,'Running bundle adjust') 376 | 377 | # general usecase bundle adjust 378 | if mode == 'general_ba': 379 | round1_opts = get_ba_opts(ba_prefix,overlap_limit=args.overlap_limit,flavor='2round_gcp_1',session=session) 380 | print ("Running general purpose bundle adjustment") 381 | if session == 'nadirpinhole': 382 | ba_args = img_list+cam_list 383 | else: 384 | ba_args = img_list 385 | # Check if this command executed till last 386 | run_cmd('bundle_adjust',round1_opts+ba_args,'Running bundle adjust') 387 | print("Script is complete !") 388 | if __name__=="__main__": 389 | main() 390 | -------------------------------------------------------------------------------- /scripts/compare_dems.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import os,sys,glob 3 | import pandas as pd 4 | import numpy as np 5 | from pygeotools.lib import iolib,geolib,malib,warplib 6 | import gdal 7 | import matplotlib.pyplot as plt 8 | import argparse 9 | import geopandas as gpd 10 | from shapely.geometry import Polygon 11 | from mpl_toolkits.axes_grid1 import make_axes_locatable 12 | import logging 13 | import json 14 | import subprocess 15 | 16 | def gdaldem(ds,producttype='slope',returnma=True): 17 | """ 18 | perform gdaldem operations such as slope, hillshade etc via the python api 19 | Parameters 20 | ----------- 21 | ds: gdal dataset 22 | DEM dataset for which derived products are to be computed 23 | producttype: str 24 | operation to perform (e.g., slope, aspect, hillshade) 25 | returnma: bool 26 | return the product as masked array if true, or a dataset if false 27 | Returns 28 | ----------- 29 | out: masked array or dataset 30 | output product in form of masked array or dataset (see params) 31 | """ 32 | dem_p_ds = gdal.DEMProcessing('',ds,producttype,format='MEM') 33 | ma = iolib.ds_getma(dem_p_ds) 34 | if returnma: 35 | out = ma 36 | else: 37 | out = dem_p_ds 38 | return out 39 | 40 | def cummulative_profile(xma,yma,xbin_width,limit_x_perc = (1,99)): 41 | """ 42 | compute binned statistics for independent variable with respect to dependendent variable 43 | Parameters 44 | ----------- 45 | xma: masked array 46 | independent variable (like slope) 47 | yma: masked array 48 | dependent variable (like elevation difference) 49 | xbin_width: int 50 | bin_width for independent variable 51 | limit_x_perc: tuple 52 | limit binning of independent variable to the given percentile (default: 1 to 99 %) 53 | Returns 54 | ----------- 55 | x_bins: np.array 56 | bin locations 57 | y_mean: np.array 58 | binned mean value for dependent variable 59 | y_meadian: np.array 60 | binned median value for dependent variable 61 | y_std: np.array 62 | binned standard deviation value for dependent varuiable 63 | y_perc: np.array 64 | binned percentage of variables within the bin 65 | """ 66 | # xclim get rids of outliers in the independent variable 67 | # we only look at the 1 to 99 percentile values by default 68 | xclim = malib.calcperc(xma,limit_x_perc) 69 | # this step computes common mask where pixels of both x and y variables are valid 70 | xma_lim = np.ma.masked_outside(xma,xclim[0],xclim[1]) 71 | cmask = malib.common_mask([xma_lim,yma]) 72 | # the common mask is used to flatten the required points in a 1-D array 73 | xma_c = np.ma.compressed(np.ma.array(xma_lim,mask=cmask)) 74 | yma_c = np.ma.compressed(np.ma.array(yma,mask=cmask)) 75 | # we then use pandas groupby to quickly compute binned statistics 76 | df = pd.DataFrame({'x':xma_c,'y':yma_c}) 77 | df['x_rounded']=(df['x']+(xbin_width-1))//(xbin_width)*xbin_width 78 | grouped=df.groupby('x_rounded') 79 | df2=grouped['y'].agg([np.mean,np.count_nonzero,np.median,np.std]) 80 | df2.reset_index(inplace=True) 81 | # variables are returned as numpy array 82 | x_bins = df2['x_rounded'].values 83 | y_mean = df2['mean'].values 84 | y_median = df2['median'].values 85 | y_std = df2['std'].values 86 | y_perc = (df2['count_nonzero'].values/np.sum( 87 | df2['count_nonzero'].values))*100 88 | return x_bins,y_mean,y_median,y_std,y_perc 89 | 90 | def getparser(): 91 | parser = argparse.ArgumentParser(description='Script for comparing 2 DEMs') 92 | parser.add_argument('-refdem',default=None,type=str, 93 | help='path to refernece DEM file') 94 | parser.add_argument('-srcdem',default=None,type=str, 95 | help='path to source DEM file') 96 | binary_choices = [1,0] 97 | parser.add_argument('-local_ortho',default=1,type=int, 98 | choices=binary_choices, 99 | help='perform comparison on local ortho grid if 1, else native grid if 0 (default: %(default)s)') 100 | res_choices = ['mean','min','max'] 101 | parser.add_argument('-comparison_res',default='min',choices=res_choices, 102 | help='common resolution at which to perform comparison, (default: %(default)s)') 103 | parser.add_argument('-elev_bin_width',default=10,type=int, 104 | help='elevation bin width for computing binned elevation difference statistics (default: %(default)s m)') 105 | parser.add_argument('-slope_bin_width',default=2,type=int, 106 | help='slope bin width for computing binned elevation difference statistics (default: %(default)s degrees)') 107 | parser.add_argument('-coreg',default=1,type=int,choices=binary_choices, 108 | help='Attempt co-registeration and redo stats calculation (default:%(default)s)') 109 | parser.add_argument('-outfol',type=str,required=False, 110 | help='path to outfolder to store results') 111 | return parser 112 | 113 | def main(): 114 | parser = getparser() 115 | args = parser.parse_args() 116 | refdem = args.refdem 117 | srcdem = args.srcdem 118 | outfolder = '{}__{}_comparison_stats'.format(os.path.splitext( 119 | os.path.basename(refdem))[0],os.path.splitext(os.path.basename( 120 | srcdem))[0]) 121 | header_str = '{}__{}'.format(os.path.splitext(os.path.basename(refdem))[0], 122 | os.path.splitext(os.path.basename(srcdem))[0]) 123 | if not os.path.exists(outfolder): 124 | os.makedirs(outfolder) 125 | if args.local_ortho == 1: 126 | temp_ds = warplib.memwarp_multi_fn([refdem,srcdem])[0] 127 | bbox = geolib.ds_extent(temp_ds) 128 | geo_crs = temp_ds.GetProjection() 129 | print ('Bounding box lon_lat is{}'.format(bbox)) 130 | bound_poly = Polygon([[bbox[0],bbox[3]],[bbox[2],bbox[3]],[bbox[2],bbox[1]],[bbox[0],bbox[1]]]) 131 | bound_shp = gpd.GeoDataFrame(index=[0],geometry=[bound_poly],crs=geo_crs) 132 | bound_centroid = bound_shp.centroid 133 | cx = bound_centroid.x.values[0] 134 | cy = bound_centroid.y.values[0] 135 | pad = np.ptp([bbox[3],bbox[1]])/6.0 136 | lat_1 = bbox[1]+pad 137 | lat_2 = bbox[3]-pad 138 | local_ortho = "+proj=ortho +lat_1={} +lat_2={} +lat_0={} +lon_0={} +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs".format(lat_1,lat_2,cy,cx) 139 | logging.info('Local Ortho projection is {}'.format(local_ortho)) 140 | t_srs = local_ortho 141 | else: 142 | t_srs = 'first' 143 | # this step performs the desired warping operation 144 | ds_list = warplib.memwarp_multi_fn([refdem,srcdem],res=args.comparison_res, 145 | t_srs=t_srs) 146 | refma = iolib.ds_getma(ds_list[0]) 147 | srcma = iolib.ds_getma(ds_list[1]) 148 | init_diff = refma - srcma 149 | init_stats = malib.get_stats_dict(init_diff) 150 | print("Original descriptive statistics {}".format(init_stats)) 151 | init_diff_json_fn = os.path.join(outfolder, 152 | '{}_precoreg_descriptive_stats.json'.format(header_str)) 153 | init_diff_json = json.dumps(init_stats) 154 | 155 | with open(init_diff_json_fn,'w') as f: 156 | f.write(init_diff_json) 157 | logging.info("Saved initial stats at {}".format(init_diff_json)) 158 | refslope = gdaldem(ds_list[0]) 159 | # stats for elevation difference vs reference DEM elevation 160 | elev_bin,diff_mean,diff_median,diff_std,diff_perc = cummulative_profile( 161 | refma,init_diff,args.elev_bin_width) 162 | # stats for elevation difference vs reference DEM slope 163 | slope_bin,diff_mean_s,diff_median_s,diff_std_s,diff_perc_s = cummulative_profile( 164 | refslope,init_diff,args.slope_bin_width) 165 | f,ax = plt.subplots(1,2,figsize=(10,4)) 166 | im = ax[0].scatter(elev_bin,diff_mean,c=diff_perc,cmap='inferno') 167 | ax[0].set_xlabel('Elevation (m)') 168 | divider = make_axes_locatable(ax[0]) 169 | cax = divider.append_axes('right', size='2.5%', pad=0.05) 170 | plt.colorbar(im, cax=cax, orientation='vertical',label='pixel count percentage') 171 | im2 = ax[1].scatter(slope_bin,diff_mean_s,c=diff_perc_s,cmap='inferno') 172 | ax[1].set_xlabel('Slope (degrees)') 173 | divider = make_axes_locatable(ax[1]) 174 | cax = divider.append_axes('right', size='2.5%', pad=0.05) 175 | plt.colorbar(im2, cax=cax, orientation='vertical',label='pixel count percentage') 176 | 177 | for axa in ax.ravel(): 178 | axa.axhline(y=0,c='k') 179 | axa.set_ylabel('Elevation Difference (m)') 180 | plt.tight_layout() 181 | precoreg_plot = os.path.join(outfolder,header_str+'_precoreg_binned_plot.png') 182 | f.savefig(precoreg_plot,dpi=300,bbox_inches='tight', pad_inches=0.1) 183 | logging.info("Saved binned plot at {}".format(precoreg_plot)) 184 | if args.coreg == 1: 185 | logging.info("will attempt coregisteration") 186 | if args.local_ortho == 1: 187 | ref_local_ortho = os.path.splitext(refdem)[0]+'_local_ortho.tif' 188 | src_local_ortho = os.path.splitext(srcdem)[0]+'_local_ortho.tif' 189 | # coregisteration works best at mean resolution 190 | # we will rewarp if the initial args.res was not mean 191 | if args.comparison_res != 'mean': 192 | ds_list = warplib.memwarp_multi_fn([refdem,srcdem],res='mean', 193 | t_srs = t_srs) 194 | refma = iolib.ds_getma(ds_list[0]) 195 | srcma = iolib.ds_getma(ds_list[1]) 196 | iolib.writeGTiff(refma,ref_local_ortho,ds_list[0]) 197 | iolib.writeGTiff(srcma,src_local_ortho,ds_list[1]) 198 | coreg_ref = ref_local_ortho 199 | src_ref = src_local_ortho 200 | else: 201 | coreg_ref = refdem 202 | src_ref = srcdem 203 | demcoreg_dir = os.path.join(outfolder,'coreg_results') 204 | align_opts = ['-mode', 'nuth','-max_iter','12','-max_offset','400', 205 | '-outdir',demcoreg_dir] 206 | align_args = [coreg_ref,src_ref] 207 | align_cmd = ['dem_align.py']+align_opts+align_args 208 | subprocess.call(align_cmd) 209 | #ah final round of warping and stats calculation 210 | try: 211 | srcdem_align = glob.glob(os.path.join(demcoreg_dir,'*align.tif'))[0] 212 | logging.info("Attempting stats calculation for aligned DEM {}".format( 213 | srcdem_align)) 214 | ds_list = warplib.memwarp_multi_fn([args.refdem,srcdem_align], 215 | res=args.comparison_res,t_srs = t_srs) 216 | refma = iolib.ds_getma(ds_list[0]) 217 | srcma = iolib.ds_getma(ds_list[1]) 218 | # this is creepy, but I am recycling variable names to save on memory 219 | init_diff = refma - srcma 220 | init_stats = malib.get_stats_dict(init_diff) 221 | print("Final descriptive statistics {}".format(init_stats)) 222 | init_diff_json_fn = os.path.join(outfolder, 223 | '{}_postcoreg_descriptive_stats.json'.format(header_str)) 224 | init_diff_json = json.dumps(init_stats) 225 | 226 | with open(init_diff_json_fn,'w') as f: 227 | f.write(init_diff_json) 228 | logging.info("Saved final stats at {}".format(init_diff_json)) 229 | refslope = gdaldem(ds_list[0]) 230 | # stats for elevation difference vs reference DEM elevation 231 | elev_bin,diff_mean,diff_median,diff_std,diff_perc = cummulative_profile( 232 | refma,init_diff,args.elev_bin_width) 233 | # stats for elevation difference vs reference DEM slope 234 | slope_bin,diff_mean_s,diff_median_s,diff_std_s,diff_perc_s = cummulative_profile( 235 | refslope,init_diff,args.slope_bin_width) 236 | f,ax = plt.subplots(1,2,figsize=(10,4)) 237 | im = ax[0].scatter(elev_bin,diff_mean,c=diff_perc,cmap='inferno') 238 | ax[0].set_xlabel('Elevation (m)') 239 | divider = make_axes_locatable(ax[0]) 240 | cax = divider.append_axes('right', size='2.5%', pad=0.05) 241 | plt.colorbar(im, cax=cax, orientation='vertical',label='pixel count percentage') 242 | im2 = ax[1].scatter(slope_bin,diff_mean_s,c=diff_perc_s,cmap='inferno') 243 | ax[1].set_xlabel('Slope (degrees)') 244 | divider = make_axes_locatable(ax[1]) 245 | cax = divider.append_axes('right', size='2.5%', pad=0.05) 246 | plt.colorbar(im2, cax=cax, orientation='vertical',label='pixel count percentage') 247 | 248 | for axa in ax.ravel(): 249 | axa.axhline(y=0,c='k') 250 | axa.set_ylabel('Elevation Difference (m)') 251 | plt.tight_layout() 252 | precoreg_plot = os.path.join(outfolder,header_str+'_postcoreg_binned_plot.png') 253 | f.savefig(precoreg_plot,dpi=300,bbox_inches='tight', pad_inches=0.1) 254 | except: 255 | logging.info("Failed to compute post coreg stats, see corresponding job log") 256 | logging.info("Script is complete !") 257 | 258 | if __name__=="__main__": 259 | main() 260 | 261 | 262 | -------------------------------------------------------------------------------- /scripts/legacy/skysat_orthorectify.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import numpy as np 4 | import os,sys,glob,shutil 5 | import argparse 6 | from skysat_stereo import asp_utils as asp 7 | from skysat_stereo import skysat 8 | from p_tqdm import p_map 9 | from imview import pltlib 10 | import itertools 11 | import ast 12 | import matplotlib.pyplot as plt 13 | from multiprocessing import cpu_count 14 | 15 | def get_parser(): 16 | parser = argparse.ArgumentParser(description='create browse image from input Skysat directory') 17 | parser.add_argument('-img_folder', help='Folder containing subdirectories of imagefiles', required=True) 18 | session_choice = ['rpc','pinhole'] 19 | parser.add_argument('-session',choices = session_choice, default = 'rpc', help = 'Session for mapproject (defualt: %(default)s)') 20 | parser.add_argument('-out_folder',help='Folder where output orthoimages will be stored', required=True) 21 | parser.add_argument('-tr',help='Output image resolution',default=None) 22 | parser.add_argument('-tsrs',help='Output crs as EPSG code, example EPSG:32610') 23 | parser.add_argument('-DEM',help='Optional DEM for mapprojecting',default='WGS84') 24 | parser.add_argument('-delete_temporary_files',help='Delete temporary individual mapprojected files written to disc',default=True) 25 | map_choices = ['science','browse'] 26 | parser.add_argument('-mode',choices=map_choices,default='browse',help='select mode for mapprojection default: %(default)s') 27 | parser.add_argument('-ba_prefix',default=None,help='bundle adjust prefix for rpc, or joiner for bundle adjusted pinhole cameras',required=False) 28 | parser.add_argument('-cam',default=None,help='camera folder containing list of tsai files for pinhole files',required=False) 29 | parser.add_argument('-frame_index',default=None,help="frame index to read frame's actual Ground sampling distance",required=False) 30 | orthomosaic_choice = [1,0] 31 | parser.add_argument('-orthomosaic',default=0,type=int,choices=orthomosaic_choice, help="if mode is science, enabling this (1) will also produce a final orthomosaic (default: %(default)s)") 32 | parser.add_argument('-copy_rpc',default=0,type=int,choices=orthomosaic_choice,help='if mode is science, enabling this (1) will copy rpc metadata in the orthoimage (default: %(default)s)') 33 | data_choices = ['video','triplet'] 34 | parser.add_argument('-data',default='triplet',choices=data_choices,help="select if mosaicing video or triplet product in science mode (default: %(default)s)") 35 | parser.add_argument('-overlap_list', default=None, 36 | help='list containing pairs for which feature matching was restricted due during cross track bundle adjustment (not required during basic triplet processing)') 37 | return parser 38 | 39 | def main(): 40 | parser = get_parser() 41 | args = parser.parse_args() 42 | tr = str(args.tr) 43 | tsrs = args.tsrs 44 | dir = os.path.abspath(args.img_folder) 45 | outdir = os.path.abspath(args.out_folder) 46 | images = sorted(glob.glob(os.path.join(dir,'*.tif*'))) 47 | if os.path.islink(images[0]): 48 | images = [os.readlink(x) for x in images] 49 | del_opt = args.delete_temporary_files 50 | dem=args.DEM 51 | cam_folder = args.cam 52 | ba_prefix = args.ba_prefix 53 | mode = args.mode 54 | if mode == 'browse': 55 | """ 56 | this block creates low-res orthomosaics from RPC info for browsing purpose only 57 | """ 58 | for_img_list,nadir_img_list,aft_img_list,for_time,nadir_time,aft_time = skysat.sort_img_list(images) 59 | for_out_dir = os.path.join(outdir,'for_map_browse') 60 | nadir_out_dir = os.path.join(outdir,'nadir_map_browse') 61 | aft_out_dir = os.path.join(outdir,'aft_map_browse') 62 | for_out_list = [os.path.join(for_out_dir,os.path.splitext(os.path.basename(img))[0]+'_browse_map.tif') for img in for_img_list] 63 | nadir_out_list = [os.path.join(nadir_out_dir,os.path.splitext(os.path.basename(img))[0]+'_browse_map.tif') for img in nadir_img_list] 64 | aft_out_list = [os.path.join(aft_out_dir,os.path.splitext(os.path.basename(img))[0]+'_browse_map.tif') for img in aft_img_list] 65 | for_count,nadir_count,aft_count = [len(for_img_list), len(nadir_img_list), len(aft_img_list)] 66 | print("Performing orthorectification for forward images {}".format(for_time)) 67 | for_map_log = p_map(asp.mapproject,for_img_list,for_out_list,[args.session]*for_count,['WGS84']*for_count,[None]*for_count, 68 | ['EPSG:4326']*for_count,[None]*for_count,[None]*for_count,[None]*for_count) 69 | print("Performing orthorectification for nadir images {}".format(nadir_time)) 70 | nadir_map_log = p_map(asp.mapproject,nadir_img_list,nadir_out_list,[args.session]*nadir_count,['WGS84']*nadir_count,[None]*nadir_count, 71 | ['EPSG:4326']*nadir_count,[None]*nadir_count,[None]*nadir_count,[None]*nadir_count) 72 | print("Performing orthorectification for aft images {}".format(aft_time)) 73 | aft_map_log = p_map(asp.mapproject,aft_img_list,aft_out_list,[args.session]*aft_count,['WGS84']*aft_count,[None]*aft_count, 74 | ['EPSG:4326']*aft_count,[None]*aft_count,[None]*aft_count,[None]*aft_count) 75 | ortho_log = os.path.join(outdir,'low_res_ortho.log') 76 | print("Orthorectification log saved at {}".format(ortho_log)) 77 | with open(ortho_log,'w') as f: 78 | total_ortho_log = for_map_log+nadir_map_log+aft_map_log 79 | for log in itertools.chain.from_iterable(total_ortho_log): 80 | f.write(log) 81 | 82 | # after orthorectification, now do mosaic 83 | for_out_mos = os.path.join(outdir,'for_map_mos_{}m.tif'.format(tr)) 84 | for_map_list = sorted(glob.glob(os.path.join(for_out_dir,'*.tif'))) 85 | nadir_out_mos = os.path.join(outdir,'nadir_map_mos_{}m.tif'.format(tr)) 86 | nadir_map_list = sorted(glob.glob(os.path.join(nadir_out_dir,'*.tif'))) 87 | aft_out_mos = os.path.join(outdir,'aft_map_mos_{}m.tif'.format(tr)) 88 | aft_map_list = sorted(glob.glob(os.path.join(aft_out_dir,'*.tif'))) 89 | print("Preparing forward browse orthomosaic") 90 | for_mos_log = asp.dem_mosaic(for_map_list,for_out_mos,tr,tsrs,stats='first',tile_size=None) 91 | print("Preparing nadir browse orthomosaic") 92 | nadir_mos_log = asp.dem_mosaic(nadir_map_list, nadir_out_mos, tr, tsrs,stats='first',tile_size=None) 93 | print("Preparing aft browse orthomosaic") 94 | aft_mos_log = asp.dem_mosaic(aft_map_list, aft_out_mos, tr, tsrs,stats='first',tile_size=None) 95 | ## delete temporary files 96 | if del_opt: 97 | [shutil.rmtree(x) for x in [for_out_dir,nadir_out_dir,aft_out_dir]] 98 | #Save figure to jpeg ? 99 | fig_title = os.path.basename(images[0]).split('_',15)[0]+'_'+for_time+'_'+nadir_time+'_'+aft_time 100 | fig,ax = plt.subplots(1,3,figsize=(10,10)) 101 | pltlib.iv_fn(for_out_mos,full=True,ax=ax[0],cmap='gray',scalebar=True,title='Forward') 102 | pltlib.iv_fn(nadir_out_mos,full=True,ax=ax[1],cmap='gray',scalebar=True,title='NADIR') 103 | pltlib.iv_fn(aft_out_mos,full=True,ax=ax[2],cmap='gray',scalebar=True,title='Aft') 104 | plt.tight_layout(rect=[0, 0.03, 1, 0.95]) 105 | fig.suptitle(fig_title) 106 | browse_img_fn = os.path.join(outdir,'browse_img_{}_{}m.jpg'.format(fig_title,tr)) 107 | fig.savefig(browse_img_fn,dpi=300,bbox_inches='tight',pad_inches=0.1) 108 | print("Browse figure saved at {}".format(browse_img_fn)) 109 | 110 | if mode == 'science': 111 | img_list = images 112 | if args.overlap_list is not None: 113 | # need to remove images and cameras which are not optimised during bundle adjustment 114 | # read pairs from input overlap list 115 | initial_count = len(img_list) 116 | with open(args.overlap_list) as f: 117 | content = f.readlines() 118 | content = [x.strip() for x in content] 119 | l_img = [x.split(' ')[0] for x in content] 120 | r_img = [x.split(' ')[1] for x in content] 121 | total_img = l_img + r_img 122 | uniq_idx = np.unique(total_img, return_index=True)[1] 123 | img_list = [total_img[idx] for idx in sorted(uniq_idx)] 124 | 125 | print(f"Out of the initial {initial_count} images, {len(img_list)} will be orthorectified using adjusted cameras") 126 | 127 | if args.frame_index is not None: 128 | frame_index = skysat.parse_frame_index(args.frame_index) 129 | img_list = [glob.glob(os.path.join(dir,'{}*.tiff'.format(frame)))[0] for frame in frame_index.name.values] 130 | print("no of images is {}".format(len(img_list))) 131 | img_prefix = [os.path.splitext(os.path.basename(img))[0] for img in img_list] 132 | out_list = [os.path.join(outdir,img+'_map.tif') for img in img_prefix] 133 | session_list = [args.session]*len(img_list) 134 | dem_list = [dem]*len(img_list) 135 | tr_list = [args.tr]*len(img_list) 136 | if args.frame_index is not None: 137 | # this hack is for video 138 | df = skysat.parse_frame_index(args.frame_index) 139 | trunc_df = df[df['name'].isin(img_prefix)] 140 | tr_list = [str(gsd) for gsd in trunc_df.gsd.values] 141 | srs_list = [tsrs]*len(img_list) 142 | if args.session == 'pinhole': 143 | if ba_prefix: 144 | cam_list = [glob.glob(os.path.abspath(ba_prefix)+'-'+os.path.splitext(os.path.basename(x))[0]+'*.tsai')[0] for x in img_list] 145 | print("No of cameras is {}".format(len(cam_list))) 146 | else: 147 | print(os.path.join(os.path.abspath(args.cam),os.path.splitext(os.path.basename(img_list[0]))[0]+'*.tsai')) 148 | cam_list = [glob.glob(os.path.join(os.path.abspath(args.cam),os.path.splitext(os.path.basename(x))[0]+'*.tsai'))[0] for x in img_list] 149 | else: 150 | cam_list = [None]*len(img_list) 151 | if ba_prefix: 152 | # not yet implemented 153 | ba_prefix_list = [ba_prefix]*len(img_list) 154 | 155 | print("Mapping given images") 156 | ortho_logs = p_map(asp.mapproject,img_list,out_list,session_list,dem_list,tr_list,srs_list,cam_list, 157 | [None]*len(img_list),[None]*len(img_list),num_cpus=int(cpu_count()/4)) 158 | ortho_log = os.path.join(outdir,'ortho.log') 159 | print("Saving Orthorectification log at {}".format(ortho_log)) 160 | with open(ortho_log,'w') as f: 161 | for log in ortho_logs: 162 | f.write(log) 163 | if args.copy_rpc == 1: 164 | print("Copying RPC from native image to orthoimage in parallel") 165 | copy_rpc_out = p_map(skysat.copy_rpc,img_list,out_list,num_cpus=cpu_count()) 166 | if args.orthomosaic == 1: 167 | print("Will also produce median, weighted average and highest resolution orthomosaic") 168 | if args.data == 'triplet': 169 | # sort images based on timestamps and resolutions 170 | img_list, time_list = skysat.sort_img_list(out_list) 171 | res_sorted_list = skysat.res_sort(out_list) 172 | 173 | # define mosaic prefix containing timestamps of inputs 174 | mos_prefix = '_'.join(np.unique([t.split('_')[0] for t in time_list]))+'__'+'_'.join(np.unique([t.split('_')[1] for t in time_list])) 175 | 176 | # define output filenames 177 | res_sorted_mosaic = os.path.join(outdir,'{}_finest_orthomosaic.tif'.format(mos_prefix)) 178 | median_mosaic = os.path.join(outdir,'{}_median_orthomosaic.tif'.format(mos_prefix)) 179 | wt_avg_mosaic = os.path.join(outdir,'{}_wt_avg_orthomosaic.tif'.format(mos_prefix)) 180 | indi_mos_list = [os.path.join(outdir,f'{time}_first_orthomosaic.tif') for time in time_list] 181 | 182 | 183 | print("producing finest resolution on top mosaic, per-pixel median and wt_avg mosaic") 184 | all_3_view_mos_logs = p_map(asp.dem_mosaic, [res_sorted_list]*3, [res_sorted_mosaic,median_mosaic,wt_avg_mosaic], 185 | ['None']*3, [None]*3, ['first','median',None],[None]*3,num_cpus=4) 186 | 187 | print("producing idependent mosaic for different views in parallel") 188 | indi_mos_count = len(time_list) 189 | if indi_mos_count>3: 190 | tile_size = 400 191 | else: 192 | tile_size = None 193 | 194 | indi_mos_log = p_map(asp.dem_mosaic,img_list, indi_mos_list, ['None']*indi_mos_count, [None]*indi_mos_count, 195 | ['first']*indi_mos_count,[tile_size]*indi_mos_count) 196 | 197 | # write out log files 198 | out_log = os.path.join(outdir,'science_mode_ortho_mos.log') 199 | total_mos_log = all_3_view_mos_logs+indi_mos_log 200 | print("Saving orthomosaic log at {}".format(out_log)) 201 | with open(out_log,'w') as f: 202 | for log in itertools.chain.from_iterable(total_mos_log): 203 | f.write(log) 204 | 205 | if args.data == 'video': 206 | res_sorted_list = skysat.res_sort(out_list) 207 | print("producing orthomasaic with finest on top") 208 | res_sorted_mosaic = os.path.join(outdir,'video_finest_orthomosaic.tif') 209 | print("producing orthomasaic with per-pixel median stats") 210 | median_mosaic = os.path.join(outdir,'video_median_orthomosaic.tif') 211 | print("producing orthomosaic with weighted average statistics") 212 | wt_avg_mosaic = os.path.join(outdir,'video_wt_avg_orthomosaic.tif') 213 | print("Mosaicing will be done in parallel") 214 | all_3_view_mos_logs = p_map(asp.dem_mosaic, [res_sorted_list]*3, [res_sorted_mosaic,median_mosaic,wt_avg_mosaic], ['None']*3, [None]*3, ['first','median',None],[None]*3) 215 | out_log = os.path.join(outdir,'science_mode_ortho_mos.log') 216 | print("Saving orthomosaic log at {}".format(out_log)) 217 | with open(out_log,'w') as f: 218 | for log in all_3_view_mos_logs: 219 | f.write(log) 220 | print("Script is complete!") 221 | 222 | if __name__=='__main__': 223 | main() 224 | 225 | -------------------------------------------------------------------------------- /scripts/legacy/skysat_overlap.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import argparse 4 | import glob 5 | from skysat_stereo import asp_utils as asp 6 | from skysat_stereo import skysat 7 | from skysat_stereo import misc_geospatial as geo 8 | import time 9 | import os,sys,glob 10 | from p_tqdm import p_umap, p_map 11 | from multiprocessing import cpu_count 12 | from shapely.geometry import Polygon 13 | from itertools import combinations,compress 14 | import numpy as np 15 | import geopandas as gpd 16 | import pandas as pd 17 | def getparser(): 18 | parser = argparse.ArgumentParser(description='Script to make overlapping pairs based on user defined minimum overlap percentage') 19 | parser.add_argument('-img_folder', help='Folder containing images with RPC information', required=True) 20 | parser.add_argument('-percentage', '--percentage', help='percentage_overlap between 0 to 1', required=True) 21 | parser.add_argument('-outfn','--out_fn',help='Text file containing the overlapping pairs') 22 | parser.add_argument('-cross_track',action='store_true',help='Also make cross-track pairs') 23 | parser.add_argument('-aoi_bbox',help='Return interesecting footprint within this aoi only',default=None) 24 | return parser 25 | 26 | # Global var 27 | geo_crs = 'EPSG:4326' 28 | 29 | def main(): 30 | #The following block of code is useful for getting a shapefile encompassing the entire subset (Use for clipping DEMs etc) 31 | #Also, I define the local ortho coordinates using the center of the big bounding box 32 | init_time = time.time() 33 | parser = getparser() 34 | args = parser.parse_args() 35 | img_folder = args.img_folder 36 | try: 37 | img_list = sorted(glob.glob(os.path.join(img_folder,'*.tif'))) 38 | print("Number of images {}".format(len(img_list))) 39 | except: 40 | print ("No images found in the directory. Make sure they end with a .tif extension") 41 | sys.exit() 42 | out_fn = args.out_fn 43 | perc_overlap = np.float(args.percentage) 44 | out_shp = os.path.splitext(out_fn)[0]+'_bound.gpkg' 45 | n_proc = cpu_count() 46 | shp_list = p_umap(skysat.skysat_footprint,img_list,num_cpus=2*n_proc) 47 | merged_shape = geo.shp_merger(shp_list) 48 | bbox = merged_shape.total_bounds 49 | print (f'Bounding box lon_lat is:{bbox}') 50 | bound_poly = Polygon([[bbox[0],bbox[3]],[bbox[2],bbox[3]],[bbox[2],bbox[1]],[bbox[0],bbox[1]]]) 51 | bound_shp = gpd.GeoDataFrame(index=[0],geometry=[bound_poly],crs=geo_crs) 52 | bound_centroid = bound_shp.centroid 53 | cx = bound_centroid.x.values[0] 54 | cy = bound_centroid.y.values[0] 55 | pad = np.ptp([bbox[3],bbox[1]])/6.0 56 | lat_1 = bbox[1]+pad 57 | lat_2 = bbox[3]-pad 58 | #local_ortho = '+proj=ortho +lat_0={} +lon_0={}'.format(cy,cx) 59 | local_aea = "+proj=aea +lat_1={} +lat_2={} +lat_0={} +lon_0={} +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs".format(lat_1,lat_2,cy,cx) 60 | print ('Local Equal Area coordinate system is : {} \n'.format(local_aea)) 61 | print('Saving bound shapefile at {} \n'.format(out_shp)) 62 | bound_shp.to_file(out_shp,driver='GPKG') 63 | 64 | 65 | # condition to check bbox aoi 66 | if args.aoi_bbox: 67 | bbox = gpd.read_file(args.aoi_bbox) 68 | mask = merged_shape.to_crs(bbox.crs).intersects(bbox) 69 | img_list = merged_shape[mask].img.values 70 | 71 | img_combinations = list(combinations(img_list,2)) 72 | n_comb = len(img_combinations) 73 | perc_overlap = np.ones(n_comb,dtype=float)*perc_overlap 74 | proj = local_aea 75 | tv = p_map(skysat.frame_intsec, img_combinations, [proj]*n_comb, perc_overlap,num_cpus=4*n_proc) 76 | # result to this contains truth value (0 or 1, overlap percentage) 77 | truth_value = [tvs[0] for tvs in tv] 78 | overlap = [tvs[1] for tvs in tv] 79 | valid_list = list(compress(img_combinations,truth_value)) 80 | overlap_perc_list = list(compress(overlap,truth_value)) 81 | print('Number of valid combinations are {}, out of total {} input images making total combinations {}\n'.format(len(valid_list),len(img_list),n_comb)) 82 | with open(out_fn, 'w') as f: 83 | img1_list = [x[0] for x in valid_list] 84 | img2_list = [x[1] for x in valid_list] 85 | for idx,i in enumerate(valid_list): 86 | #f.write("%s %s\n" % i) 87 | f.write(f"{os.path.abspath(img1_list[idx])} {os.path.abspath(img2_list[idx])}\n") 88 | out_fn_overlap = os.path.splitext(out_fn)[0]+'_with_overlap_perc.pkl' 89 | img1_list = [x[0] for x in valid_list] 90 | img2_list = [x[1] for x in valid_list] 91 | out_df = pd.DataFrame({'img1':img1_list,'img2':img2_list,'overlap_perc':overlap_perc_list}) 92 | out_df.to_pickle(out_fn_overlap) 93 | if args.cross_track: 94 | cross_track = True 95 | else: 96 | cross_track = False 97 | out_fn_stereo = os.path.splitext(out_fn_overlap)[0]+'_stereo_only.pkl' 98 | stereo_only_df = skysat.prep_trip_df(out_fn_overlap,cross_track=cross_track) 99 | stereo_only_df.to_pickle(out_fn_stereo) 100 | out_fn_stereo_ba = os.path.splitext(out_fn_overlap)[0]+'_stereo_only.txt' 101 | stereo_only_df[['img1','img2']].to_csv(out_fn_stereo_ba,sep=' ',header=False,index=False) 102 | print('Script completed in time {} s!'.format(time.time()-init_time)) 103 | 104 | if __name__=="__main__": 105 | main() 106 | -------------------------------------------------------------------------------- /scripts/legacy/skysat_preprocess.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob,re 4 | import argparse 5 | from pygeotools.lib import iolib,malib 6 | from skysat_stereo import asp_utils as asp 7 | from skysat_stereo import skysat 8 | from p_tqdm import p_map 9 | import numpy as np 10 | from multiprocessing import cpu_count 11 | import pandas as pd 12 | 13 | def getparser(): 14 | parser = argparse.ArgumentParser(description = 'Script for initialing frame cameras for Skysat triplet stereo and video, performing user defined video subsampling') 15 | modes = ['video','triplet'] 16 | parser.add_argument('-mode',default='video',choices=modes, help='choose Skysat product to work with') 17 | session_choices = ['rpc','pinhole'] 18 | parser.add_argument('-t',default='pinhole',choices=session_choices,help='choose between pinhole and rpc mode (default: %(default)s)') 19 | parser.add_argument('-img',default=None,help='folder containing images',required=True) 20 | sampling_mode_choices = ['sampling_interval', 'num_images'] 21 | parser.add_argument('-video_sampling_mode', default = 'num_images', choices = sampling_mode_choices, required = False, help = 'Chose desired sampling procedure, either fixed sampling interval or by equally distributed user defined number of samples (default: %(default)s)') 22 | parser.add_argument('-sampler',default = 5 ,type = int, help = 'if video_sampling_mode: sampling_interval, this is the sampling interval, else this is the number of samples to be selected (default: %(default)s)') 23 | parser.add_argument('-outdir', default = None, required = True, help = 'Output folder to save cameras and GCPs') 24 | parser.add_argument('-frame_index',default=None,help='Frame index csv file provided with L1A video products, will be used for determining stereo combinations') 25 | parser.add_argument('-overlap_pkl',default=None,help='pkl dataframe containing entries of overlapping pairs for triplet run, obtained from skysat_overlap_parallel.py') 26 | parser.add_argument('-dem',default=None,help='Reference DEM to be used for frame camera initialisation') 27 | product_levels = ['l1a','l1b'] 28 | parser.add_argument('-product_level', choices = product_levels,default='l1b',required = False, help = 'Product level being processed, (default: %(default)s)') 29 | return parser 30 | 31 | def main(): 32 | parser = getparser() 33 | args = parser.parse_args() 34 | mode = args.mode 35 | session = args.t 36 | img_folder = os.path.abspath(args.img) 37 | outdir = os.path.abspath(args.outdir) 38 | if not os.path.exists(outdir): 39 | try: 40 | os.makedir(outdir) 41 | except: 42 | os.makedirs(outdir) 43 | if mode == 'video': 44 | sampling = args.video_sampling_mode 45 | frame_index = skysat.parse_frame_index(args.frame_index,True) 46 | product_level = 'l1a' 47 | num_samples = len(frame_index) 48 | frames = frame_index.name.values 49 | sampler = args.sampler 50 | outdf = os.path.join(outdir,os.path.basename(args.frame_index)) 51 | if sampling == 'sampling_interval': 52 | print("Hardcoded sampling interval results in frame exclusion at the end of the video sequence based on step size, better to chose the num_images mode and the program will equally distribute accordingly") 53 | idx = np.arange(0,num_samples,sampler) 54 | outdf = '{}_sampling_inteval_{}.csv'.format(os.path.splitext(outdf)[0],sampler) 55 | else: 56 | print("Sampling {} from {} of the input video sequence".format(sampler,num_samples)) 57 | idx = np.linspace(0,num_samples-1,sampler,dtype=int) 58 | outdf = '{}_sampling_inteval_aprox{}.csv'.format(os.path.splitext(outdf)[0],idx[1]-idx[0]) 59 | sub_sampled_frames = frames[idx] 60 | sub_df = frame_index[frame_index['name'].isin(list(sub_sampled_frames))] 61 | sub_df.to_csv(outdf,sep=',',index=False) 62 | #this is camera/gcp initialisation 63 | n = len(sub_sampled_frames) 64 | img_list = [glob.glob(os.path.join(img_folder,'{}*.tiff'.format(frame)))[0] for frame in sub_sampled_frames] 65 | pitch = [1]*n 66 | out_fn = [os.path.join(outdir,'{}_frame_idx.tsai'.format(frame)) for frame in sub_sampled_frames] 67 | out_gcp = [os.path.join(outdir,'{}_frame_idx.gcp'.format(frame)) for frame in sub_sampled_frames] 68 | frame_index = [args.frame_index]*n 69 | camera = [None]*n 70 | gcp_factor = 4 71 | 72 | elif mode == 'triplet': 73 | df = pd.read_pickle(args.overlap_pkl) 74 | img_list = list(np.unique(np.array(list(df.img1.values)+list(df.img2.values)))) 75 | img_list = [os.path.splitext(os.path.basename(img))[0] for img in img_list] 76 | cam_list = [glob.glob(os.path.join(img_folder,'{}*.tif'.format(img)))[0] for img in img_list] 77 | n = len(img_list) 78 | if args.product_level == 'l1b': 79 | pitch = [0.8]*n 80 | else: 81 | pitch = [1.0]*n 82 | out_fn = [os.path.join(outdir,'{}_rpc.tsai'.format(frame)) for frame in img_list] 83 | out_gcp = [os.path.join(outdir,'{}_rpc.gcp'.format(frame)) for frame in img_list] 84 | camera = cam_list 85 | frame_index = [None]*n 86 | img_list = cam_list 87 | gcp_factor = 8 88 | fl = [553846.153846]*n 89 | cx = [1280]*n 90 | cy = [540]*n 91 | dem = args.dem 92 | ht_datum = [malib.get_stats_dict(iolib.fn_getma(dem))['median']]*n # use this value for height where DEM has no-data 93 | gcp_std = [1]*n 94 | datum = ['WGS84']*n 95 | refdem = [dem]*n 96 | n_proc = 30 97 | #n_proc = cpu_count() 98 | cam_gen_log = p_map(asp.cam_gen,img_list,fl,cx,cy,pitch,ht_datum,gcp_std,out_fn,out_gcp,datum,refdem,camera,frame_index,num_cpus = n_proc) 99 | print("writing gcp with basename removed") 100 | # count expexted gcp 101 | print(f"Total expected GCP {gcp_factor*n}") 102 | asp.clean_gcp(out_gcp,outdir) 103 | # saving subprocess consolidated log file 104 | from datetime import datetime 105 | now = datetime.now() 106 | log_fn = os.path.join(outdir,'camgen_{}.log'.format(now)) 107 | print("saving subprocess camgen log at {}".format(log_fn)) 108 | with open(log_fn,'w') as f: 109 | for log in cam_gen_log: 110 | f.write(log) 111 | print("Script is complete !") 112 | 113 | if __name__=="__main__": 114 | main() 115 | -------------------------------------------------------------------------------- /scripts/legacy/skysat_stereo_cli.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from skysat_stereo import asp_utils as asp 4 | from skysat_stereo import skysat 5 | import numpy as np 6 | import argparse 7 | import os,sys,glob 8 | from multiprocessing import cpu_count 9 | from p_tqdm import p_map 10 | from tqdm import tqdm 11 | 12 | def getparser(): 13 | parser = argparse.ArgumentParser(description='Script for performing stereo jobs, generalised for skysat video and triplet stereo products') 14 | modes = ['video', 'triplet'] 15 | parser.add_argument('-mode',default='video',choices=modes,help='choose Skysat product to work with') 16 | session_choices = ['rpc', 'nadirpinhole', 'rpcmaprpc', 'pinholemappinhole'] 17 | # mapprojecting inputs are faster to process, and generally more complete 18 | # (less holes) + accurate (less blunders in stereo matching) 19 | parser.add_argument('-threads',default=cpu_count(),type=int, 20 | help='number of threads to use for each stereo process, (default: %(default)s)') 21 | entry_choice = ['pprc','corr','rfne','fltr','tri'] 22 | parser.add_argument('-entry_point',type=str,default='pprc',help='start stereo from a particular stage (default: %(default)s)') 23 | parser.add_argument('-t',default='nadirpinhole',choices=session_choices,help='choose between pinhole and rpc mode (default: %(default)s)') 24 | parser.add_argument('-img',default=None,help='folder containing images',required=True) 25 | parser.add_argument('-cam',default=None,help='folder containing cameras, if using nadirpinhole/pinholemappinhole workflow',required=False) 26 | # note that the camera should contain similar names as images. We do a 27 | # simple string search to read appropriate camera. 28 | parser.add_argument('-ba_prefix',default=None, help='bundle adjust prefix for reading transforms from .adjust files, mainly for rpc runs, or for reading the correct cameras from a bundle adjustment directory containing multiple generations of pinhole cameras', required=False) 29 | parser.add_argument('-overlap_pkl',default=None,help='pkl dataframe containing entries of overlapping pairs for triplet run, obtained from skysat_overlap_parallel.py') 30 | parser.add_argument('-frame_index',default=None,help='Frame index csv file provided with L1A video products, will be used for determining stereo combinations') 31 | parser.add_argument('-sampling_interval',default=5,required=False,type=int,help='Sampling interval between stereo DEM input pairs, or the interval at which master images are picked for multiview stereo triangulation (default: %(default)s)') 32 | parser.add_argument('-dem',default=None,help='Reference DEM to be used in triangulation, if input images are mapprojected') 33 | texture_choices = ['low', 'normal'] 34 | parser.add_argument('-texture',default='normal',choices=texture_choices,help='keyword to adapt processing for low texture surfaces, for example in case of fresh snow (default: %(default)s)',required=False) 35 | crop_ops = [1,0] 36 | parser.add_argument('-crop_map',default=1,type=int,choices=crop_ops,help='To crop mapprojected images to same resolution and extent or not before stereo') 37 | parser.add_argument('-outfol', default=None, help='output folder where stereo outputs will be saved', required=True) 38 | mvs_choices = [1, 0] 39 | parser.add_argument('-mvs', default=0, type=int, choices=mvs_choices, help='1: Use multiview stereo triangulation for video data, do matching with next 20 slave for each master image/camera (defualt: %(default)s') 40 | parser.add_argument('-block', default=0, type=int, choices=mvs_choices, help='1: use block matching instead of default MGM (default: %(default)s') 41 | parser.add_argument('-full_extent',type=int,choices = mvs_choices,default=1, 42 | help='Selecting larger intervals can result in lower footprint output DEM, if 1: then DEMs with smaller interval image pairs will be padded at the begining and end of the video sequence (default: %(default)s)') 43 | parser.add_argument('-writeout_only', action='store_true', help='writeout_jobs to a text file, not run') 44 | parser.add_argument('-job_fn',type=str,help='text file to write stereo jobs to') 45 | parser.add_argument('-cross_track',action='store_true', help='attempt stereo for cross_track pairs as well') 46 | return parser 47 | 48 | def main(): 49 | parser = getparser() 50 | args = parser.parse_args() 51 | img = os.path.abspath(args.img) 52 | try: 53 | img_list = sorted(glob.glob(os.path.join(img, '*.tif'))) 54 | temp = img_list[1] 55 | except BaseException: 56 | img_list = sorted(glob.glob(os.path.join(img, '*.tiff'))) 57 | if len(img_list) == 0: 58 | print("No images in the specified folder, exiting") 59 | sys.exit() 60 | mode = args.mode 61 | session = args.t 62 | ba_prefix = args.ba_prefix 63 | overlap_list_fn = args.overlap_pkl 64 | frame_index = args.frame_index 65 | dem = args.dem 66 | texture = args.texture 67 | sampling_interval = args.sampling_interval 68 | if args.cam: 69 | cam_folder = args.cam 70 | if args.ba_prefix: 71 | ba_prefix = args.ba_prefix 72 | outfol = args.outfol 73 | if mode == 'video': 74 | # assume for now that we are still operating on a fixed image interval method 75 | # can accomodate different convergence angle function method here. 76 | frame_gdf = skysat.parse_frame_index(frame_index) 77 | # for now hardcording sgm,mgm,kernel params, should accept as inputs. 78 | # Maybe discuss with David with these issues/decisions when the overall 79 | # system is in place 80 | if args.mvs == 1: 81 | job_list = skysat.video_mvs(img,t=session,cam_fol=args.cam,ba_prefix=args.ba_prefix,dem=args.dem,sampling_interval=sampling_interval,texture=texture,outfol=outfol, block=args.block,frame_index=frame_gdf) 82 | else: 83 | if args.full_extent == 1: 84 | full_extent = True 85 | else: 86 | full_extent=False 87 | job_list = skysat.prep_video_stereo_jobs(img,t=session,cam_fol=args.cam,ba_prefix=args.ba_prefix,dem=args.dem,sampling_interval=sampling_interval,texture=texture,outfol=outfol,block=args.block,frame_index=frame_gdf,full_extent=full_extent,entry_point=args.entry_point) 88 | elif mode == 'triplet': 89 | if args.crop_map == 1: 90 | crop_map = True 91 | else: 92 | crop_map = False 93 | job_list = skysat.triplet_stereo_job_list(cross_track=args.cross_track,t=args.t, 94 | threads = args.threads,overlap_list=args.overlap_pkl, img_list=img_list, ba_prefix=args.ba_prefix, cam_fol=args.cam, dem=args.dem, crop_map=crop_map,texture=texture, outfol=outfol, block=args.block,entry_point=args.entry_point) 95 | if not args.writeout_only: 96 | # decide on number of processes 97 | # if block matching, Plieades is able to handle 30-40 4 threaded jobs on bro node 98 | # if MGM/SGM, 25 . This stepup is arbitrariry, research on it more. 99 | # next build should accept no of jobs and stereo threads as inputs 100 | 101 | print(job_list[0]) 102 | n_cpu = cpu_count() 103 | # no of parallel jobs with user specified threads per job 104 | jobs = int(n_cpu/args.threads) 105 | stereo_log = p_map(asp.run_cmd,['stereo']*len(job_list), job_list, num_cpus=jobs) 106 | stereo_log_fn = os.path.join(outfol,'stereo_log.log') 107 | print("Consolidated stereo log saved at {}".format(stereo_log_fn)) 108 | #with open(stereo_log_fn,'w') as f: 109 | # for logs in stereo_log: 110 | # f.write(logs) 111 | else: 112 | print(f"Writng jobs at {args.job_fn}") 113 | print(f"hey typr of job is {type(job_list)}") 114 | 115 | with open(args.job_fn,'w') as f: 116 | for idx,job in enumerate(tqdm(job_list)): 117 | try: 118 | job_str = 'stereo ' + ' '.join(job) + '\n' 119 | f.write(job_str) 120 | except: 121 | continue 122 | print("Script is complete") 123 | 124 | if __name__ == "__main__": 125 | main() 126 | -------------------------------------------------------------------------------- /scripts/optimise_raw_camera.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob 4 | import pandas as pd 5 | import geopandas as gpd 6 | import numpy as np 7 | from skysat_stereo import asp_utils as asp 8 | from scipy.optimize import least_squares 9 | from pyquaternion import Quaternion 10 | import argparse 11 | from pygeotools.lib import iolib,geolib 12 | import logging 13 | from pyproj import Transformer 14 | 15 | def cam_solve(q1,q2,q3,q4,CX,CY,CZ,cu,cv,fu,fv,pitch,X,Y,Z): 16 | """ 17 | Forward Solver for simple pinhole camera model 18 | Parameters 19 | ----------- 20 | q1,q2,q3,q4: float 21 | quaternions 22 | CX,CY,CZ: float 23 | camera center position in ECEF system 24 | cx,cy: float 25 | position of optical center in pixel units 26 | fu,fv: float 27 | focal length in pixel units 28 | pitch: float 29 | camera pixel pitch in floats 30 | X,Y,Z: float 31 | 3D points in ECEF coordinate system 32 | Returns 33 | ------------ 34 | px,py: float 35 | points in image plane coordinates 36 | """ 37 | #print(q1,q2,q3,q4) 38 | quaternion = Quaternion(q1,q2,q3,q4) 39 | rot_mat = quaternion.rotation_matrix 40 | rot_mat_inv = np.linalg.inv(rot_mat) 41 | world_points = np.stack((X,Y,Z),axis=0) 42 | cam_cen = np.array([CX,CY,CZ]) 43 | cam_cord = np.matmul(rot_mat_inv,world_points) - np.reshape( 44 | np.matmul(rot_mat_inv,cam_cen),(3,1)) 45 | px = (fu*cam_cord[0])/(pitch*cam_cord[2]) + (cu/pitch) 46 | py = (fv*cam_cord[1])/(pitch*cam_cord[2]) + (cv/pitch) 47 | return px,py 48 | 49 | def reprojection_error(tpl,CX,CY,CZ,cu,cv,fu,fv,pitch,X,Y,Z,im_x,im_y): 50 | """ 51 | tpl: tuple 52 | tuple containing four quaternion (this will be optimized) 53 | CX,CY,CZ: float 54 | camera center position in ECEF system 55 | cx,cy: float 56 | position of optical center in pixel units 57 | fu,fv: float 58 | focal length in pixel units 59 | pitch: float 60 | camera pixel pitch in floats 61 | X,Y,Z: float 62 | 3D points in ECEF coordinate system from GCP 63 | im_x,im_y: float 64 | measured image pixel positions from GCP 65 | Returns 66 | ---------- 67 | res: float 68 | residual between estimated and actual image coordinate 69 | """ 70 | #print(tpl) 71 | q1,q2,q3,q4 = tpl 72 | px,py = cam_solve(q1,q2,q3,q4,CX,CY,CZ,cu,cv,fu,fv,pitch,X,Y,Z) 73 | #res = (im_x-px)**2 + (im_y-py)**2 74 | res = np.array(list(im_x-px) + list(im_y-py)).ravel() 75 | return res 76 | 77 | def optimiser_quaternion(q1,q2,q3,q4,CX,CY,CZ,cu,cv,fu,fv,pitch,X,Y,Z,im_x,im_y): 78 | """ 79 | q1,q2,q3,q4: float 80 | initial guess four quaternion (this will be optimized) 81 | CX,CY,CZ: float 82 | camera center position in ECEF system 83 | cx,cy: float 84 | position of optical center in pixel units 85 | fu,fv: float 86 | focal length in pixel units 87 | pitch: float 88 | camera pixel pitch in floats 89 | X,Y,Z: float 90 | 3D points in ECEF coordinate system from GCP 91 | im_x,im_y: float 92 | measured image pixel positions from GCP 93 | Returns 94 | ---------- 95 | q1,q2,q3,q4: float 96 | optimised_quaternion 97 | """ 98 | print(q1) 99 | tpl_init = (q1,q2,q3,q4) 100 | error_func = lambda tpl: reprojection_error(tpl,CX,CY,CZ,cu,cv,fu,fv,pitch,X,Y,Z,im_x,im_y) 101 | print("Initial reprojection error {} RMSE px".format(np.sqrt(np.sum(error_func(tpl_init)**2)))) 102 | result = least_squares(error_func,tpl_init[:], 103 | bounds=((-1,-1,-1,-1),(1,1,1,1)),method='dogbox') 104 | #bounds are specified for the quaternions to normalise them 105 | Q1,Q2,Q3,Q4 = result.x 106 | print("Final reprojection error {} RMSE px".format(np.sqrt(np.sum(error_func((Q1,Q2,Q3,Q4))**2)))) 107 | return(Q1,Q2,Q3,Q4) 108 | 109 | def getparser(): 110 | parser = argparse.ArgumentParser( 111 | description="optimise the raw camera model from cam_gen to confirm to satellite telemetry information") 112 | parser.add_argument('-camera_folder',required=True, 113 | help='Folder containing cam_gen derived frame camera model') 114 | parser.add_argument('-gcp_folder',required=False,default=None, 115 | help='Folder containing corner gcps; if none, program looks for gcps in the camera folder') 116 | parser.add_argument('-frame_index',required=True, 117 | help='path to frame_index.csv file') 118 | parser.add_argument('-outfol',required=True, 119 | help='path to folder to save optimised camera model') 120 | return parser 121 | 122 | def main(): 123 | parser = getparser() 124 | args = parser.parse_args() 125 | f_index = args.frame_index 126 | if os.path.splitext(f_index)[1] == '.csv': 127 | frame_index = pd.read_csv(f_index) 128 | else: 129 | frame_index = pd.read_pickle(f_index) 130 | logging.info("sample fn {}".format(glob.glob(os.path.join(args.camera_folder, 131 | '*{}*.tsai'.format(frame_index['name'].values[0]))))) 132 | 133 | # cam_list = [glob.glob(os.path.join(args.camera_folder,'*{}*.tsai'.format(os.path.basename(frame))))[0] for frame in frame_index['name'].values] 134 | cam_list = [] 135 | for frame in frame_index['name'].values: 136 | try: 137 | cam_list.append(glob.glob(os.path.join(args.camera_folder,'*{}*.tsai'.format(os.path.basename(frame))))[0]) 138 | except: 139 | continue 140 | 141 | if not args.gcp_folder: 142 | gcp_folder = args.camera_folder 143 | else: 144 | gcp_folder = args.gcp_folder 145 | if not os.path.exists(args.outfol): 146 | os.makedirs(args.outfol) 147 | gcp_list = [glob.glob(os.path.join(gcp_folder,'*{}*.gcp'.format(os.path.basename(frame))))[0] for frame in frame_index['name'].values] 148 | CX,CY,CZ = [frame_index.x_sat_ecef_km.values*1000,frame_index.y_sat_ecef_km*1000,frame_index.z_sat_ecef_km*1000] 149 | rotation_matrices = [Quaternion(matrix=(np.reshape(asp.read_tsai_dict(x)['rotation_matrix'],(3,3)))) for x in cam_list] 150 | fu,fv = asp.read_tsai_dict(cam_list[0])['focal_length'] 151 | cu,cv = asp.read_tsai_dict(cam_list[0])['optical_center'] 152 | pitch = asp.read_tsai_dict(cam_list[0])['pitch'] 153 | q1 = [x[0] for x in rotation_matrices] 154 | q2 = [x[1] for x in rotation_matrices] 155 | q3 = [x[2] for x in rotation_matrices] 156 | q4 = [x[3] for x in rotation_matrices] 157 | for idx,row in frame_index.iterrows(): 158 | identifier = os.path.basename(row['name']) 159 | gcp = pd.read_csv(glob.glob(os.path.join(gcp_folder,'*{}*.gcp'.format(identifier)))[0],header=None,sep=' ') 160 | im_x,im_y = [gcp[8].values,gcp[9].values] 161 | lon,lat,ht = [gcp[2].values,gcp[1].values,gcp[3].values] 162 | ecef_proj = 'EPSG:4978' 163 | geo_proj = 'EPSG:4326' 164 | wgs2ecef = Transformer.from_crs(geo_proj,ecef_proj) 165 | X,Y,Z = wgs2ecef.transform(lat,lon,ht) 166 | CX_idx,CY_idx,CZ_idx = [CX[idx],CY[idx],CZ[idx]] 167 | q1_idx,q2_idx,q3_idx,q4_idx = [q1[idx],q2[idx],q3[idx],q4[idx]] 168 | #tpl_int = (q1_idx,q2_idx,q3_idx,q4_idx) 169 | print(idx) 170 | Q1,Q2,Q3,Q4 = optimiser_quaternion(q1_idx,q2_idx,q3_idx,q4_idx,CX_idx, 171 | CY_idx,CZ_idx,cu,cv,fu,fv,pitch,X,Y,Z,im_x,im_y) 172 | rot_mat = Quaternion([Q1,Q2,Q3,Q4]).rotation_matrix 173 | out_cam = os.path.join(args.outfol,'{}_scipy.tsai'.format(identifier)) 174 | asp.make_tsai(out_cam,cu,cv,fu,fv,rot_mat,[CX_idx,CY_idx,CZ_idx],pitch) 175 | logging.info("Successfully created optimised camera models at {}".format( 176 | args.outfol)) 177 | if __name__=='__main__': 178 | main() 179 | -------------------------------------------------------------------------------- /scripts/plot_disparity.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import os 3 | import sys 4 | import matplotlib.pyplot as plt 5 | from pygeotools.lib import iolib 6 | import glob 7 | import numpy as np 8 | from imview.lib import pltlib 9 | def find_clim(im1,im2): 10 | perc1 = np.percentile(im1,(2,98)) 11 | perc2 = np.percentile(im2,(2,98)) 12 | perc = (np.min([perc1[0],perc2[0]]),np.max([perc1[1],perc2[1]])) 13 | abs_max = np.max(np.abs(perc)) 14 | perc = (-abs_max,abs_max) 15 | return perc 16 | 17 | dir_list = [os.path.abspath(x) for x in sys.argv[1:]] 18 | for dir in dir_list: 19 | disparity_file = glob.glob(os.path.join(dir,'*-F.tif'))[0] #this is a multichannel file 20 | left_image_warped = glob.glob(os.path.join(dir,'*-L.tif'))[0] #1 channel 21 | right_image_warped = glob.glob(os.path.join(dir,'*-R.tif'))[0] #1 channel 22 | disp_ds = iolib.fn_getds(disparity_file) 23 | error_fn = glob.glob(os.path.join(dir,'*In*.tif'))[0] 24 | dem_fn = glob.glob(os.path.join(dir,'*-DEM.tif'))[0] 25 | dx = iolib.fn_getma(disparity_file,bnum=1) 26 | dy = iolib.fn_getma(disparity_file,bnum=2) 27 | img1 = iolib.fn_getma(left_image_warped) 28 | img2 = iolib.fn_getma(right_image_warped) 29 | error = iolib.fn_getma(error_fn) 30 | dem = iolib.fn_getma(dem_fn) 31 | dem_ds = iolib.fn_getds(dem_fn) 32 | base_dir = os.path.basename(dir) 33 | 34 | #title_str = dt_string+sat_string+img_string 35 | fig,ax = plt.subplots(3,2,figsize=(9,6)) 36 | #print(disparity_file) 37 | #add code to create a fig 38 | #do not plot it, just save it as a png. 39 | #at some point, try to add a figure showing changes of disparity with elevation, might be good to add DEM as an input. 40 | #but that can be done only for map images 41 | pltlib.iv(img1,cmap='gray',title='Left',ax=ax[0,0],clim=(0,1)) 42 | pltlib.iv(img2,cmap='gray',title='Right',ax=ax[0,1],clim=(0,1)) 43 | clim = find_clim(dx,dy) 44 | pltlib.iv(dx,cmap='RdBu',title='dx',clim=clim,ax=ax[1,0]) 45 | pltlib.iv(dy,cmap='RdBu',title='dy',clim=clim,ax=ax[1,1]) 46 | pltlib.iv(error,cmap='plasma',title='Intersection error (m)',ax=ax[2,0]) 47 | pltlib.iv(dem,ds=dem_ds,scalebar=True,title="Digital Elevation Model",hillshade=True,ax=ax[2,1]) 48 | #fig.suptitle(title_str) 49 | fig.tight_layout() 50 | #outfn = dir+'convergence_{}_intersection_area_{}km2'.format(convergence_angle,intersection_area)+'.jpg' 51 | #print('Saving disparity plot at {} \n'.format(outfn)) 52 | plt.show() 53 | 54 | #fig.savefig(outfn,dpi=300) 55 | print('Script Complete!') 56 | 57 | 58 | -------------------------------------------------------------------------------- /scripts/prep_dense_ba_run.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob,re 4 | import argparse 5 | import numpy 6 | from skysat_stereo import skysat 7 | from skysat_stereo import skysat_stereo_workflow as workflow 8 | from tqdm import tqdm 9 | import pandas as pd 10 | import shutil 11 | 12 | def getparser(): 13 | parser = argparse.ArgumentParser( 14 | description='Script for copying/renaming dense match files, creating new overlap list if the dense matches were created on ortho imagery stereo jobs') 15 | parser.add_argument('-img', default=None, help='path to unmapped image folder',required=False) 16 | parser.add_argument('-orig_pickle', default=None, help='path to original overlap pickle written by skysat_overlap.py (default: %(default)s)',required=False) 17 | parser.add_argument('-dense_match_pickle', default=None, help = 'path to pickle file written by stereo file (default: %(default)s)',required=False) 18 | parser.add_argument('-stereo_dir', default=None, help = 'master triplet stereo directory (default: %(default)s)',required=True) 19 | parser.add_argument('-ba_dir', default=None, help = 'path to expected ba directory (default: %(default)s)',required=True) 20 | mode_opt = [1,0] 21 | parser.add_argument('-modify_overlap',choices = mode_opt, type = int, default = 0, help = 'by default, copy match files only (0), if (1), then modify overlap (default: %(default)s)') 22 | parser.add_argument('-out_overlap_fn', default = None, help='out overlap filename (default: %(default)s)',required=False) 23 | return parser 24 | 25 | def main(): 26 | parser = getparser() 27 | args = parser.parse_args() 28 | stereo_master_dir = os.path.abspath(args.stereo_dir) 29 | ba_dir = os.path.abspath(args.ba_dir) 30 | if args.modify_overlap == 1: 31 | img_fol = os.path.abspath(args.img) 32 | orig_pickle = os.path.abspath(args.orig_pickle) 33 | dense_match_pickle = os.path.abspath(args.dense_match_pickle) 34 | else: 35 | img_fol = None 36 | orig_pickle=None 37 | dense_match_pickle = None 38 | workflow.dense_match_wrapper(stereo_master_dir,ba_dir,modify_overlap=args.modify_overlap, 39 | img_fol=img_fol,orig_pickle=orig_pickle,dense_match_pickle=dense_match_pickle, 40 | out_overlap_fn=args.out_overlap_fn) 41 | print("Script is complete !") 42 | 43 | if __name__=="__main__": 44 | main() -------------------------------------------------------------------------------- /scripts/reformat_frameindex.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | import argparse 4 | import os,sys,glob,shutil 5 | import pandas as pd 6 | from shapely import wkt 7 | from shapely.geometry.polygon import orient 8 | 9 | def getparser(): 10 | parser = argparse.ArgumentParser(description='Light-weight script to reformat new versions of Planet provided frame_index file') 11 | parser.add_argument('-in_frameindex',type=str,help='path to original frame_index.csv') 12 | return parser 13 | 14 | def _correct_geom(row): 15 | return wkt.loads(row['geom']) 16 | 17 | def main(): 18 | parser = getparser() 19 | args = parser.parse_args() 20 | original_frame_fn = args.in_frameindex 21 | frame_index = pd.read_csv(original_frame_fn) 22 | frame_index['geom'] = frame_index.apply(_correct_geom,axis=1) 23 | 24 | #orient the polygon geometry 25 | updated_geomlist_asp_convention = [orient(test_geom,-1) for test_geom in frame_index['geom'].values] 26 | 27 | # remove the space between POLYGON and ((# 28 | updated_geomlist_asp_convention = [f"POLYGON(({str(test_geom).split(' ((')[1]}" for test_geom in updated_geomlist_asp_convention] 29 | 30 | # remove the repeated last coordinate 31 | updated_geomlist_asp_convention = [','.join(test_geom.split(',')[:-1])+'))' for test_geom in updated_geomlist_asp_convention] 32 | 33 | # update geometry column 34 | frame_index['geom'] = updated_geomlist_asp_convention 35 | 36 | #writeout 37 | outfn = os.path.splitext(original_frame_fn)[0]+"_asp_convention.csv" 38 | frame_index.to_csv(outfn,index=False) 39 | 40 | if __name__=="__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /scripts/skysat_ctrack.pbs: -------------------------------------------------------------------------------- 1 | #PBS -S /bin/csh 2 | 3 | #Can be run interactively 4 | #qsub -N 169823_177845 -v pair=WV01_20110927_1020010016982300_1020010017784500 ~/bin/singlepair.pbs 5 | 6 | #Export environmental variables to batch job 7 | #PBS -V 8 | 9 | #Join stdout and stderr 10 | #PBS -j oe 11 | 12 | #Queue name 13 | #PBS -q long 14 | 15 | #Resources 16 | #PBS -l select=1:model=bro_ele,walltime=6:00:00 17 | 18 | #Mail options 19 | #PBS -m abe 20 | 21 | #Group ID 22 | # #This was NESSF 23 | # #PBS -W group_list=s1271 24 | # #Arendt 25 | # #PBS -W group_list=s1768 26 | # #FINESST 27 | ## PBS -W group_list=s3224 28 | # #HMA2 29 | # PBS -W group_list=s2407 30 | #Rerunnable (y/n) 31 | #PBS -r n 32 | 33 | #Check to make sure we're given an input pair 34 | 35 | unalias cd 36 | 37 | if ($?PBS_O_WORKDIR) then 38 | cd $PBS_O_WORKDIR 39 | else 40 | setenv PBS_O_WORKDIR `pwd` 41 | endif 42 | 43 | 44 | set ts = `date +%Y%m%d_%H%M` 45 | set logdir = $PBS_O_WORKDIR/log 46 | if (! -d $logdir) mkdir $logdir 47 | set jobid = `echo $PBS_JOBID | awk -F'.' '{print $1}'` 48 | set pair = "skysat_ba" 49 | set logfile = $logdir/${pair}_${ts}_${jobid}.log 50 | 51 | echo $logfile 52 | 53 | #Wrapper script to run ASP 54 | #set script = ./skysat_hardcode.sh 55 | #set script = ./bundle_adjust_skysat.sh 56 | #set script = /nobackupp11/sbhusha1/sw/wv_stereo_processing/scripts/new_dg_stereo_SGM.sh 57 | 58 | #Create a local copy of script in output directory, for reference 59 | #set script_cp = $pair/${script:t:r}_${jobid}.${script:e} 60 | #cp -pv $script $script_cp 61 | 62 | echo "Current directory is:" 63 | pwd 64 | echo 65 | 66 | #set rpcdem = ../stack_all/*gaussfill-tile-0.tif 67 | #set rpcdem = /nobackup/deshean/snowex/stereo/gm/orig_products/demcoreg_ref_ASO_dsm/gm_8m_trans-tile-0_gaussfill-tile-0.tif 68 | #set rpcdem = /nobackup/deshean/salardeuni/20190727_sdi_x/rename/P1BS/sdu_x_3697m_dem.tif 69 | 70 | #This was for rerunning with updated rpcdem 71 | if (! $?rpcdem) then 72 | set rpcdem = "" 73 | endif 74 | 75 | #bash $script >& $logfile 76 | #$script_cp $pair $rpcdem $resol >& $logfile 77 | 78 | #$script_cp $pair >& $logfile 79 | 80 | parallel --progress -j 40 < $job_fn >& $logfile 81 | -------------------------------------------------------------------------------- /scripts/skysat_dem_mos.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob,shutil 4 | import numpy as np 5 | import argparse 6 | from skysat_stereo import asp_utils as asp 7 | from skysat_stereo import skysat 8 | from skysat_stereo import skysat_stereo_workflow as workflow 9 | from tqdm import tqdm 10 | from p_tqdm import p_map 11 | 12 | from pygeotools.lib import iolib,warplib 13 | 14 | def getparser(): 15 | parser = argparse.ArgumentParser(description='Script to compute DEM mosaics from triplet output directory') 16 | parser.add_argument('-DEM_folder', help='Folder containing subdirectories of DEM', required=True) 17 | parser.add_argument('-out_folder', help='Where composite DEMs are to be saved, if none, creates a composite DEM directory in the input main directory', required=False,default=None) 18 | parser.add_argument('-identifier',help='if we want to mosaic individually aligned DEM which have been produced by skysat_pc_cam.py, place the identifiers here',required=False,default=None) 19 | mode_ch = ['video','triplet'] 20 | parser.add_argument('-mode',default='triplet',choices=mode_ch,help="select if mosaicing video or triplet stereo output DEMs (default: %(default)s)") 21 | parser.add_argument('-tile_size',default=None,help='Tile size for tiled processing, helpful on nodes with less memory or if num_dems are large') 22 | binary_ch = [1,0] 23 | parser.add_argument('-filter_dem',choices=binary_ch,default=1,type=int, 24 | help="filter video DEM composites using max NMAD and min count combination (default: %(default)s)") 25 | parser.add_argument('-min_video_count',type=float,default=2, 26 | help='minimum DEM count to use in filtering (default: %(default)s)') 27 | parser.add_argument('-max_video_nmad',type=float,default=5, 28 | help='maximum DEM NMAD variability to filter, if DEM count is also <= min_count (default: %(default)s)') 29 | return parser 30 | 31 | def main(): 32 | parser = getparser() 33 | args = parser.parse_args() 34 | dir = os.path.abspath(args.DEM_folder) 35 | workflow.dem_mosaic_wrapper(dir,mode=args.mode,out_folder=args.out_folder,identifier=args.identifier, 36 | tile_size=args.tile_size,filter_dem=args.filter_dem, 37 | min_video_count=args.min_video_count,max_video_nmad=args.max_video_nmad) 38 | print("Script complete") 39 | 40 | if __name__=="__main__": 41 | main() -------------------------------------------------------------------------------- /scripts/skysat_orthorectify.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import numpy as np 4 | import os,sys,glob,shutil 5 | import argparse 6 | from skysat_stereo import asp_utils as asp 7 | from skysat_stereo import skysat 8 | from skysat_stereo import skysat_stereo_workflow as workflow 9 | from p_tqdm import p_map 10 | from imview import pltlib 11 | import itertools 12 | import ast 13 | import matplotlib.pyplot as plt 14 | from multiprocessing import cpu_count 15 | 16 | 17 | def get_parser(): 18 | parser = argparse.ArgumentParser(description='create browse image from input Skysat directory') 19 | parser.add_argument('-img_folder', help='Folder containing subdirectories of imagefiles', required=True) 20 | session_choice = ['rpc','pinhole'] 21 | parser.add_argument('-session',choices = session_choice, default = 'rpc', help = 'Session for mapproject (defualt: %(default)s)') 22 | parser.add_argument('-out_folder',help='Folder where output orthoimages will be stored', required=True) 23 | parser.add_argument('-tr',help='Output image resolution',default=None) 24 | parser.add_argument('-tsrs',help='Output crs as EPSG code, example EPSG:32610') 25 | parser.add_argument('-DEM',help='Optional DEM for mapprojecting',default='WGS84') 26 | parser.add_argument('-delete_temporary_files',help='Delete temporary individual mapprojected files written to disc',default=True) 27 | map_choices = ['science','browse'] 28 | parser.add_argument('-mode',choices=map_choices,default='browse',help='select mode for mapprojection default: %(default)s') 29 | parser.add_argument('-ba_prefix',default=None,help='bundle adjust prefix for rpc, or joiner for bundle adjusted pinhole cameras',required=False) 30 | parser.add_argument('-cam',default=None,help='camera folder containing list of tsai files for pinhole files',required=False) 31 | parser.add_argument('-frame_index',default=None,help="frame index to read frame's actual Ground sampling distance",required=False) 32 | orthomosaic_choice = [1,0] 33 | parser.add_argument('-orthomosaic',default=0,type=int,choices=orthomosaic_choice, help="if mode is science, enabling this (1) will also produce a final orthomosaic (default: %(default)s)") 34 | parser.add_argument('-copy_rpc',default=0,type=int,choices=orthomosaic_choice,help='if mode is science, enabling this (1) will copy rpc metadata in the orthoimage (default: %(default)s)') 35 | data_choices = ['video','triplet'] 36 | parser.add_argument('-data',default='triplet',choices=data_choices,help="select if mosaicing video or triplet product in science mode (default: %(default)s)") 37 | parser.add_argument('-overlap_list', default=None, 38 | help='list containing pairs for which feature matching was restricted due during cross track bundle adjustment (not required during basic triplet processing)') 39 | return parser 40 | 41 | 42 | def main(): 43 | parser = get_parser() 44 | args = parser.parse_args() 45 | if args.tr is not None: 46 | tr = str(args.tr) 47 | else: 48 | tr = None 49 | tsrs = args.tsrs 50 | dir = os.path.abspath(args.img_folder) 51 | outdir = os.path.abspath(args.out_folder) 52 | images = sorted(glob.glob(os.path.join(dir,'*.tif*'))) 53 | if os.path.islink(images[0]): 54 | images = [os.readlink(x) for x in images] 55 | del_opt = args.delete_temporary_files 56 | dem=args.DEM 57 | cam_folder = args.cam 58 | ba_prefix = args.ba_prefix 59 | mode = args.mode 60 | workflow.execute_skysat_orhtorectification(images,outdir,data=args.data,dem=dem, 61 | tr=tr,tsrs=tsrs,del_opt=args.delete_temporary_files, 62 | cam_folder=cam_folder,ba_prefix=ba_prefix,mode=mode, 63 | session=args.session,overlap_list=args.overlap_list, 64 | frame_index_fn=args.frame_index,copy_rpc=args.copy_rpc, 65 | orthomosaic=args.orthomosaic) 66 | print("Script is complete!") 67 | 68 | if __name__=='__main__': 69 | main() 70 | -------------------------------------------------------------------------------- /scripts/skysat_overlap.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import argparse 4 | import glob 5 | from skysat_stereo import asp_utils as asp 6 | from skysat_stereo import skysat 7 | from skysat_stereo import skysat_stereo_workflow as workflow 8 | from skysat_stereo import misc_geospatial as geo 9 | import time 10 | import os,sys,glob 11 | from p_tqdm import p_umap, p_map 12 | from multiprocessing import cpu_count 13 | from shapely.geometry import Polygon 14 | from itertools import combinations,compress 15 | import numpy as np 16 | import geopandas as gpd 17 | import pandas as pd 18 | def getparser(): 19 | parser = argparse.ArgumentParser(description='Script to make overlapping pairs based on user defined minimum overlap percentage') 20 | parser.add_argument('-img_folder', help='Folder containing images with RPC information', required=True) 21 | parser.add_argument('-percentage', '--percentage', help='percentage_overlap between 0 to 1', type=float, required=True) 22 | parser.add_argument('-outfn','--out_fn',help='Text file containing the overlapping pairs', type=str, required=True) 23 | parser.add_argument('-cross_track',action='store_true',help='Also make cross-track pairs') 24 | parser.add_argument('-aoi_bbox',help='Return interesecting footprint within this aoi only', default=None) 25 | return parser 26 | 27 | # Global var 28 | geo_crs = 'EPSG:4326' 29 | 30 | def main(): 31 | #The following block of code is useful for getting a shapefile encompassing the entire subset (Use for clipping DEMs etc) 32 | #Also, I define the local ortho coordinates using the center of the big bounding box 33 | init_time = time.time() 34 | parser = getparser() 35 | args = parser.parse_args() 36 | img_folder = args.img_folder 37 | workflow.prepare_stereopair_list_rtree(img_folder,args.percentage,args.out_fn,args.aoi_bbox,cross_track=args.cross_track) 38 | print(f'Script completed in time {time.time()-init_time}') 39 | 40 | if __name__=="__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /scripts/skysat_pc_cam.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob 4 | import numpy as np 5 | from skysat_stereo import asp_utils as asp 6 | from skysat_stereo import skysat 7 | from skysat_stereo import skysat_stereo_workflow as workflow 8 | from p_tqdm import p_map,p_umap 9 | import psutil 10 | import argparse 11 | from rpcm import geo 12 | from pygeotools.lib import iolib,geolib 13 | 14 | 15 | 16 | def get_parser(): 17 | parser = argparse.ArgumentParser(description="utility to grid and register DEMs, pinhole cameras to a referece DEM, using ASP's ICP algorithm") 18 | mode_choice = ['gridding_only', 'classic_dem_align', 'multi_align', 'align_cameras'] 19 | parser.add_argument('-mode', help='operation mode', choices=mode_choice, required=True) 20 | # gridding only choices 21 | parser.add_argument('-tr', default=2, type=float, help='DEM gridding resolution (default: %(default)s)') 22 | parser.add_argument('-tsrs', default=None, help='Projection for gridded DEM if not using local UTM (default: %(default)s)') 23 | parser.add_argument('-point_cloud_list', nargs='*', help='List of pointclouds for gridding') 24 | # classic dem align options, also carried forward to multi_align 25 | align_choice = ['point-to-point', 'point-to-plane'] 26 | parser.add_argument('-align', choices=align_choice, default='point-to-plane', help='ICP Alignment algorithm (defualt: %(default)s)') 27 | parser.add_argument('-initial_align',default=None,type=str,help='Alignment transform from initial PC align run') 28 | parser.add_argument('-max_displacement', default=100.0, type=float, help='Maximum allowable displacement between two DEMs (defualt: %(default)s)') 29 | trans_choice = [0, 1] 30 | parser.add_argument('-trans_only', default=0, type = int, choices=trans_choice, help='1: compute translation only, (default: %(default)s)') 31 | parser.add_argument('-outprefix', default=None, help='outprefix for alignment results') 32 | parser.add_argument('-refdem',default=None,help='DEM used as refrence for alignment') 33 | parser.add_argument('-source_dem',default = None,help = 'DEM to be aligned') 34 | parser.add_argument('-source_dem_list', nargs='*', help='List of source DEMs to be aligned to a common reference, used in multi_align operation mode') 35 | # Camera align args 36 | parser.add_argument('-transform', help='transfrom.txt file written by pc_align, used to align transform cameras to correct locations') 37 | parser.add_argument('-cam_list',nargs='*', help='list of cameras to be transformed') 38 | parser.add_argument('-outfol',help='folder where aligned cameras will be saved') 39 | parser.add_argument('-rpc', choices=trans_choice, default=0, type = int, help='1: also write out updated RPC, (default: %(default)s)') 40 | parser.add_argument('-dem', default='None', help='DEM used for generating RPC') 41 | parser.add_argument('-img_list', nargs='*', help='list of images for which RPC will be generated') 42 | return parser 43 | 44 | 45 | def main(): 46 | parser = get_parser() 47 | args = parser.parse_args() 48 | mode = args.mode 49 | if mode == 'gridding_only': 50 | workflow.gridding_wrapper(args.point_cloud_list,args.tr,args.tsrs) 51 | elif mode == 'classic_dem_align': 52 | workflow.alignment_wrapper_single(args.refdem,args.source_dem,args.max_displacement,args.outprefix, 53 | args.align,args.trans_only,initial_align=args.initial_align) 54 | elif mode == 'multi_align': 55 | workflow.alignment_wrapper_multi(args.refdem,args.source_dem_list,args.max_displacement,args.align, 56 | trans_only=args.trans_only,initial_align=args.initial_align) 57 | elif mode == 'align_cameras': 58 | workflow.align_cameras_wrapper(args.cam_list,args.transform,args.outfol,rpc=args.rpc, 59 | dem=args.dem,img_list=args.img_list) 60 | if __name__=="__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /scripts/skysat_preprocess.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os,sys,glob,re 4 | import argparse 5 | from pygeotools.lib import iolib,malib 6 | from skysat_stereo import asp_utils as asp 7 | from skysat_stereo import skysat 8 | from skysat_stereo import skysat_stereo_workflow as workflow 9 | from p_tqdm import p_map 10 | import numpy as np 11 | from multiprocessing import cpu_count 12 | import pandas as pd 13 | 14 | def getparser(): 15 | parser = argparse.ArgumentParser(description = 'Script for initialing frame cameras for Skysat triplet stereo and video, performing user defined video subsampling') 16 | modes = ['video','triplet'] 17 | parser.add_argument('-mode',default='video',choices=modes, help='choose Skysat product to work with') 18 | session_choices = ['rpc','pinhole'] 19 | parser.add_argument('-t',default='pinhole',choices=session_choices,help='choose between pinhole and rpc mode (default: %(default)s)') 20 | parser.add_argument('-img',default=None,help='folder containing images',required=True) 21 | sampling_mode_choices = ['sampling_interval', 'num_images'] 22 | parser.add_argument('-video_sampling_mode', default = 'num_images', choices = sampling_mode_choices, required = False, help = 'Chose desired sampling procedure, either fixed sampling interval or by equally distributed user defined number of samples (default: %(default)s)') 23 | parser.add_argument('-sampler',default = 5 ,type = int, help = 'if video_sampling_mode: sampling_interval, this is the sampling interval, else this is the number of samples to be selected (default: %(default)s)') 24 | parser.add_argument('-outdir', default = None, required = True, help = 'Output folder to save cameras and GCPs') 25 | parser.add_argument('-frame_index',default=None,help='Frame index csv file provided with L1A video products, will be used for determining stereo combinations') 26 | parser.add_argument('-overlap_pkl',default=None,help='pkl dataframe containing entries of overlapping pairs for triplet run, obtained from skysat_overlap_parallel.py') 27 | parser.add_argument('-dem',default=None,help='Reference DEM to be used for frame camera initialisation') 28 | product_levels = ['l1a','l1b'] 29 | parser.add_argument('-product_level', choices = product_levels,default='l1b',required = False, help = 'Product level being processed, (default: %(default)s)') 30 | return parser 31 | 32 | def main(): 33 | parser = getparser() 34 | args = parser.parse_args() 35 | mode = args.mode 36 | session = args.t 37 | img_folder = os.path.abspath(args.img) 38 | outdir = os.path.abspath(args.outdir) 39 | cam_gen_log = workflow.skysat_preprocess(img_folder,mode,sampling=args.video_sampling_mode,frame_index_fn=args.frame_index, 40 | product_level=args.product_level,sampler=args.sampler,overlap_pkl=args.overlap_pkl,dem=args.dem, 41 | outdir=args.outdir) 42 | 43 | from datetime import datetime 44 | now = datetime.now() 45 | log_fn = os.path.join(outdir,'camgen_{}.log'.format(now)) 46 | print("saving subprocess camgen log at {}".format(log_fn)) 47 | with open(log_fn,'w') as f: 48 | for log in cam_gen_log: 49 | f.write(log) 50 | print("Script is complete !") 51 | 52 | if __name__=="__main__": 53 | main() 54 | -------------------------------------------------------------------------------- /scripts/skysat_stereo_cli.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from skysat_stereo import asp_utils as asp 4 | from skysat_stereo import skysat 5 | import numpy as np 6 | import argparse 7 | import os,sys,glob 8 | from multiprocessing import cpu_count 9 | from p_tqdm import p_map 10 | from skysat_stereo import skysat_stereo_workflow as workflow 11 | from tqdm import tqdm 12 | 13 | def getparser(): 14 | parser = argparse.ArgumentParser(description='Script for performing stereo jobs, generalised for skysat video and triplet stereo products') 15 | modes = ['video', 'triplet'] 16 | parser.add_argument('-mode',default='video',choices=modes,help='choose Skysat product to work with') 17 | session_choices = ['rpc', 'nadirpinhole', 'rpcmaprpc', 'pinholemappinhole'] 18 | # mapprojecting inputs are faster to process, and generally more complete 19 | # (less holes) + accurate (less blunders in stereo matching) 20 | parser.add_argument('-threads',default=cpu_count(),type=int, 21 | help='number of threads to use for each stereo process, (default: %(default)s)') 22 | entry_choice = ['pprc','corr','rfne','fltr','tri'] 23 | parser.add_argument('-entry_point',type=str,default='pprc',help='start stereo from a particular stage (default: %(default)s)') 24 | parser.add_argument('-t',default='nadirpinhole',choices=session_choices,help='choose between pinhole and rpc mode (default: %(default)s)') 25 | parser.add_argument('-img',default=None,help='folder containing images',required=True) 26 | parser.add_argument('-cam',default=None,help='folder containing cameras, if using nadirpinhole/pinholemappinhole workflow',required=False) 27 | # note that the camera should contain similar names as images. We do a 28 | # simple string search to read appropriate camera. 29 | parser.add_argument('-ba_prefix',default=None, help='bundle adjust prefix for reading transforms from .adjust files, mainly for rpc runs, or for reading the correct cameras from a bundle adjustment directory containing multiple generations of pinhole cameras', required=False) 30 | parser.add_argument('-overlap_pkl',default=None,help='pkl dataframe containing entries of overlapping pairs for triplet run, obtained from skysat_overlap_parallel.py') 31 | parser.add_argument('-frame_index',default=None,help='Frame index csv file provided with L1A video products, will be used for determining stereo combinations') 32 | parser.add_argument('-sampling_interval',default=5,required=False,type=int,help='Sampling interval between stereo DEM input pairs, or the interval at which master images are picked for multiview stereo triangulation (default: %(default)s)') 33 | parser.add_argument('-dem',default=None,help='Reference DEM to be used in triangulation, if input images are mapprojected') 34 | texture_choices = ['low', 'normal'] 35 | parser.add_argument('-texture',default='normal',choices=texture_choices,help='keyword to adapt processing for low texture surfaces, for example in case of fresh snow (default: %(default)s)',required=False) 36 | crop_ops = [1,0] 37 | parser.add_argument('-crop_map',default=1,type=int,choices=crop_ops,help='To crop mapprojected images to same resolution and extent or not before stereo') 38 | parser.add_argument('-outfol', default=None, help='output folder where stereo outputs will be saved', required=True) 39 | mvs_choices = [1, 0] 40 | parser.add_argument('-mvs', default=0, type=int, choices=mvs_choices, help='1: Use multiview stereo triangulation for video data, do matching with next 20 slave for each master image/camera (defualt: %(default)s') 41 | parser.add_argument('-block', default=0, type=int, choices=mvs_choices, help='1: use block matching instead of default MGM (default: %(default)s') 42 | parser.add_argument('-full_extent',type=int,choices = mvs_choices,default=1, 43 | help='Selecting larger intervals can result in lower footprint output DEM, if 1: then DEMs with smaller interval image pairs will be padded at the begining and end of the video sequence (default: %(default)s)') 44 | parser.add_argument('-writeout_only', action='store_true', help='writeout_jobs to a text file, not run') 45 | parser.add_argument('-job_fn',type=str,help='text file to write stereo jobs to',default=None) 46 | parser.add_argument('-cross_track',action='store_true', help='attempt stereo for cross_track pairs as well') 47 | return parser 48 | 49 | 50 | def main(): 51 | parser = getparser() 52 | args = parser.parse_args() 53 | img = os.path.abspath(args.img) 54 | 55 | workflow.execute_skysat_stereo(img,args.outfol,args.mode,session=args.t, 56 | dem=args.dem,texture=args.texture,sampling_interval=args.sampling_interval, 57 | cam_folder=args.cam,ba_prefix=args.ba_prefix,writeout_only=args.writeout_only, 58 | mvs=args.mvs,block=args.block,crop_map=args.crop_map,full_extent=args.full_extent, 59 | entry_point=args.entry_point,threads=args.threads,overlap_pkl=args.overlap_pkl, 60 | frame_index=args.frame_index,job_fn=args.job_fn,cross_track=args.cross_track) 61 | 62 | print("Script is complete") 63 | 64 | if __name__ == "__main__": 65 | main() 66 | 67 | -------------------------------------------------------------------------------- /scripts/skysat_triplet_pipeline.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import subprocess 3 | import argparse 4 | from datetime import datetime 5 | import os,sys,glob,shutil 6 | from rpcm import geo 7 | import numpy as np 8 | import geopandas as gpd 9 | from distutils.spawn import find_executable 10 | from skysat_stereo import misc_geospatial as misc 11 | from skysat_stereo import asp_utils as asp 12 | from skysat_stereo import bundle_adjustment_lib as ba 13 | from skysat_stereo import skysat_stereo_workflow as workflow 14 | 15 | """ 16 | Script for running the full pipeline based on workflow described in ISPRS 2020 submission 17 | Need to specify input image folder, input refrence DEM folder 18 | """ 19 | #TODO: 20 | # Add an option of cleaning up the lots of intermediate files produced 21 | 22 | def getparser(): 23 | parser = argparse.ArgumentParser(description='Wrapper script to run full triplet stereo workflow') 24 | parser.add_argument('-in_img',default=None,type=str,help='path to Folder containing L1B imagery') 25 | parser.add_argument('-aoi_bbox',default=None,type=str,help='path to bounding box shapefile if limiting processing to a smaller aoi') 26 | parser.add_argument('-orthodem',default=None,type=str,help='path to Reference DEM to use in orthorectification and camera resection, if not provided, will use coregdem') 27 | parser.add_argument('-coregdem',default=None,type=str,help='path to reference DEM to use in coregisteration') 28 | parser.add_argument('-mask_dem',default=1,type=int,choices=[1,0],help='mask reference DEM for static surfaces before coreg (default: %(default)s)') 29 | mask_opt = ['glaciers','glaciers+nlcd'] 30 | parser.add_argument('-mask_dem_opt',default='glaciers',choices=mask_opt,help='surfaces to mask if -mask_dem=1, default is glaciers which uses RGI polygons.\ 31 | If processing in CONUS, the option of glaciers+nlcd also additionaly masks out forest surfaces') 32 | parser.add_argument('-ortho_workflow',default=1,type=int,choices=[1,0],help='option to orthorectify before stereo or not') 33 | parser.add_argument('-block_matching',default=0,type=int,choices=[1,0],help='whether to use block matching in final stereo matching, default is 0 (not)') 34 | parser.add_argument('-job_name',default=None,type=str,help='identifier for output folder and final composite products') 35 | parser.add_argument('-outfolder',default=None,type=str,help='path to output folder to save results in') 36 | bin_choice = [1,0] 37 | parser.add_argument('-full_workflow',choices=bin_choice,type=int,default=1,help='Specify 1 to run full workflow (default: %(default)s)') 38 | parser.add_argument('-partial_workflow_steps',nargs='*',help='specify steps of workflow to run') 39 | return parser 40 | 41 | def main(): 42 | parser = getparser() 43 | args = parser.parse_args() 44 | img_folder = args.in_img 45 | coreg_dem = args.coregdem 46 | if args.orthodem is not None: 47 | ortho_dem = args.orthodem 48 | else: 49 | ortho_dem = coreg_dem 50 | # Check for input files 51 | img_list = glob.glob(os.path.join(img_folder,'*.tif'))+glob.glob(os.path.join(img_folder,'*.tiff')) 52 | if len(img_list)<2: 53 | print(f"Only {len(img_list)} images detected, exiting") 54 | sys.exit() 55 | if not os.path.exists(coreg_dem): 56 | print(f"Coreg dem {coreg_dem} could not be located, exiting") 57 | sys.exit() 58 | if not os.path.exists(ortho_dem): 59 | print(f"Ortho dem {ortho_dem} could not be located, exiting") 60 | sys.exit() 61 | 62 | # structure for output folder 63 | out_fol = os.path.join(args.outfolder,'proc_out') 64 | job_name = args.job_name 65 | 66 | #Universal Args 67 | if args.ortho_workflow == 1: 68 | map = True 69 | else: 70 | map = False 71 | if map: 72 | init_stereo_session = 'rpcmaprpc' 73 | init_ortho_session = 'rpc' 74 | final_stereo_session = 'pinholemappinhole' 75 | final_ortho_session = 'pinhole' 76 | else: 77 | init_stereo_session = 'rpc' 78 | init_ortho_session = 'rpc' 79 | final_stereo_session, final_ortho_session = ['nadirpinhole','pinhole'] 80 | 81 | # For consistency, lets hardcode expected file names,folder names here :) 82 | # step1 outputs 83 | overlap_full_txt = os.path.join(out_fol,'overlap.txt') 84 | overlap_full_pkl = os.path.splitext(overlap_full_txt)[0]+'_with_overlap_perc.pkl' 85 | overlap_stereo_pkl = os.path.splitext(overlap_full_pkl)[0]+'_stereo_only.pkl' 86 | overlap_stereo_txt = os.path.splitext(overlap_full_pkl)[0]+'_stereo_only.txt' 87 | bound_fn = os.path.splitext(overlap_full_txt)[0]+'_bound.gpkg' 88 | bound_buffer_fn = os.path.splitext(bound_fn)[0]+'_1km_buffer.gpkg' 89 | 90 | # step2 outputs 91 | cam_gcp_directory = os.path.join(out_fol,'camgen_cam_gcp') 92 | 93 | # step3 outputs 94 | init_ortho_dir = os.path.join(out_fol,'init_rpc_ortho') 95 | init_stereo_dir = os.path.join(out_fol,'init_rpc_stereo') 96 | 97 | # step4 bundle_adjust dense matches 98 | init_ba = os.path.join(out_fol,'ba_pinhole') 99 | ba_prefix = os.path.join(init_ba,'run') 100 | 101 | # step5 stereo_args 102 | intermediate_ortho_dir = os.path.join(out_fol,'intermediate_pinhole_ortho') 103 | final_stereo_dir = os.path.join(out_fol,'final_pinhole_stereo') 104 | 105 | # step 6, dem gridding and mosaicing 106 | mos_dem_dir = os.path.join(final_stereo_dir,'composite_dems') 107 | 108 | # step 7. dem_alignment 109 | alignment_dir = os.path.join(out_fol,'georegistered_dem_mos') 110 | 111 | # step 8, camera alignment 112 | aligned_cam_dir = os.path.join(out_fol,'georegistered_cameras') 113 | 114 | # step 9, final orthorectification 115 | final_ortho_dir = os.path.join(out_fol,'georegistered_orthomosaics') 116 | 117 | # step 10, plot figure 118 | final_figure = os.path.join(out_fol,f"{job_name}_result.jpg") 119 | 120 | # step 11, experimental rpc production 121 | 122 | if args.full_workflow == 1: 123 | steps2run = np.arange(0,11) # run the entire 9 steps 124 | else: 125 | steps2run = np.array(args.partial_workflow_steps).astype(int) 126 | 127 | #workflow_steps 128 | # create output directory 129 | if not os.path.exists(out_fol): 130 | os.makedirs(out_fol) 131 | 132 | #copy reference DEM(s) to refdem directory 133 | # if parallel runs on different nodes use the same DEM, then will have issues 134 | refdem_dir = os.path.join(out_fol,'refdem') 135 | if not os.path.exists(refdem_dir): 136 | os.makedirs(refdem_dir) 137 | shutil.copy2(coreg_dem,os.path.join(refdem_dir,os.path.basename(coreg_dem))) 138 | if coreg_dem != ortho_dem: 139 | diff_dem = True 140 | shutil.copy2(ortho_dem,os.path.join(refdem_dir,os.path.basename(ortho_dem))) 141 | else: 142 | diff_dem = False 143 | # replace old variable names 144 | coreg_dem = os.path.join(refdem_dir,os.path.basename(coreg_dem)) 145 | ortho_dem = os.path.join(refdem_dir,os.path.basename(ortho_dem)) 146 | 147 | 148 | if 1 in steps2run: 149 | print("Computing overlapping pairs") 150 | # Step 1 Compute overlapping pairs 151 | # Inputs: Image directory, minimum overlap percentage 152 | overlap_perc = 0.01 # 1 percent essentially 153 | 154 | workflow.prepare_stereopair_list_rtree(img_folder,overlap_perc,overlap_full_txt, 155 | aoi_bbox=args.aoi_bbox) 156 | 157 | 158 | print("Computing Target UTM zones for orthorectification") 159 | gdf = gpd.read_file(bound_fn) 160 | clon,clat = [gdf.centroid.x.values,gdf.centroid.y.values] 161 | epsg_code = f'EPSG:{geo.compute_epsg(clon,clat)}' 162 | print(f"Detected UTM zone is {epsg_code}") 163 | if not os.path.exists(bound_buffer_fn): 164 | print("Creating buffered shapefile") 165 | gdf_proj = gdf.to_crs(epsg_code) 166 | gdf_proj['geometry'] = gdf_proj.buffer(1000) 167 | gdf_proj.to_file(bound_buffer_fn,driver='GPKG') 168 | 169 | print("Cropping reference DEMs to extent of SkySat footprint + 1 km buffer") 170 | misc.clip_raster_by_shp_disk(coreg_dem,bound_buffer_fn) 171 | misc.ndvtrim_function(os.path.splitext(coreg_dem)[0]+'_shpclip.tif') 172 | coreg_dem = os.path.splitext(coreg_dem)[0]+'_shpclip_trim.tif' 173 | 174 | if diff_dem: 175 | misc.clip_raster_by_shp_disk(ortho_dem,bound_buffer_fn) 176 | misc.ndvtrim_function(os.path.splitext(ortho_dem)[0]+'_shpclip.tif') 177 | ortho_dem = os.path.splitext(ortho_dem)[0]+'_shpclip_trim.tif' 178 | else: 179 | ortho_dem = coreg_dem 180 | 181 | 182 | if 2 in steps2run: 183 | print("Generating Frame Cameras") 184 | cam_gen_log = workflow.skysat_preprocess(img_folder,mode='triplet', 185 | product_level='l1b',overlap_pkl=overlap_stereo_pkl,dem=ortho_dem, 186 | outdir=cam_gcp_directory) 187 | 188 | now = datetime.now() 189 | log_fn = os.path.join(cam_gcp_directory,'camgen_{}.log'.format(now)) 190 | print("saving subprocess camgen log at {}".format(log_fn)) 191 | with open(log_fn,'w') as f: 192 | for log in cam_gen_log: 193 | f.write(log) 194 | 195 | if 3 in steps2run: 196 | # specify whether to run using maprojected sessions or not 197 | 198 | if map: 199 | # orthorectify all the images first 200 | print("Orthorectifying images using RPC camera") 201 | workflow.execute_skysat_orhtorectification(images=img_list,data='triplet',session=init_ortho_session, 202 | outdir=init_ortho_dir,tsrs=epsg_code,dem=ortho_dem,mode='science', 203 | overlap_list=overlap_stereo_txt,copy_rpc=1,orthomosaic=0) 204 | init_stereo_input_img_folder = init_ortho_dir 205 | else: 206 | init_stereo_input_img_folder = img_folder 207 | print("Running stereo using RPC cameras") 208 | # Note crop_map = 0 option, this does not do warping to common extent and resolution for orthoimages before stereo, because we want to 209 | # presrve this crucail information for correctly unwarped dense match points 210 | workflow.execute_skysat_stereo(init_stereo_input_img_folder,init_stereo_dir, 211 | mode='triplet',session=init_stereo_session, 212 | dem=ortho_dem,texture='normal',writeout_only=False, 213 | block=1,crop_map=0,threads=2,overlap_pkl=overlap_stereo_pkl, 214 | cross_track=False) 215 | 216 | 217 | # copy dense match file to ba directory 218 | workflow.dense_match_wrapper(stereo_master_dir=os.path.abspath(init_stereo_dir), 219 | ba_dir=os.path.abspath(init_ba),modify_overlap=0) 220 | 221 | 222 | if 4 in steps2run: 223 | # this is bundle adjustment step 224 | # we use dense files copied from previous step 225 | 226 | ba_prefix = os.path.join(init_ba,'run') 227 | print("running bundle adjustment") 228 | ba.bundle_adjust_stable(img=img_folder,ba_prefix=ba_prefix,cam=os.path.abspath(cam_gcp_directory), 229 | session='nadirpinhole',overlap_list=overlap_stereo_txt, 230 | num_iter=700,num_pass=2,mode='full_triplet') 231 | 232 | 233 | 234 | if 5 in steps2run: 235 | # this is where final stereo will take place 236 | # first we orthorectify again, if map = True 237 | if map: 238 | workflow.execute_skysat_orhtorectification(images=img_list,data='triplet',session=final_ortho_session, 239 | outdir=intermediate_ortho_dir,tsrs=epsg_code,dem=ortho_dem, 240 | ba_prefix=ba_prefix+'-run',mode='science',overlap_list=overlap_stereo_txt, 241 | copy_rpc=1,orthomosaic=0) 242 | print("Running intermediate orthorectification with bundle adjusted pinhole cameras") 243 | 244 | final_stereo_input_img_folder = intermediate_ortho_dir 245 | else: 246 | final_stereo_input_img_folder = img_folder 247 | # now run stereo 248 | print("Running final stereo reconstruction") 249 | workflow.execute_skysat_stereo(final_stereo_input_img_folder, 250 | final_stereo_dir,ba_prefix=ba_prefix+'-run', 251 | mode='triplet',session=final_stereo_session, 252 | dem=ortho_dem,texture='normal',writeout_only=False, 253 | block=args.block_matching,crop_map=1,threads=2,overlap_pkl=overlap_stereo_pkl, 254 | cross_track=False) 255 | 256 | 257 | 258 | if 6 in steps2run: 259 | 260 | pc_list = sorted(glob.glob(os.path.join(final_stereo_dir,'20*/2*/run-PC.tif'))) 261 | print(f"Identified {len(pc_list)} clouds") 262 | 263 | # this is dem gridding followed by mosaicing 264 | workflow.gridding_wrapper(pc_list,tr=2) 265 | 266 | print("Mosaicing DEMs") 267 | 268 | workflow.dem_mosaic_wrapper(dir=os.path.abspath(final_stereo_dir),mode='triplet', 269 | out_folder=os.path.abspath(mos_dem_dir)) 270 | 271 | if 7 in steps2run: 272 | # this is DEM alignment step 273 | # add option to mask coreg_dem for static surfaces 274 | # might want to remove glaciers, forest et al. before coregisteration 275 | # this can potentially be done in asp_utils step 276 | # actually use dem_mask.py with options of nlcd, nlcd_filter (not_forest) and of course RGI glacier polygons 277 | if args.mask_dem == 1: 278 | # this might change for non-US sites, best to use bareground files 279 | if args.mask_dem_opt == 'glaciers': 280 | mask_list = ['glaciers'] 281 | elif args.msak_dem_opt == 'glaciers+nlcd': 282 | mask_list = ['nlcd','glaciers'] 283 | print("Masking reference DEM to static surfaces") 284 | misc.dem_mask_disk(mask_list,os.path.abspath(coreg_dem)) 285 | coreg_dem = os.path.splitext(coreg_dem)[0]+'_ref.tif' 286 | 287 | #now perform alignment 288 | median_mos_dem = glob.glob(os.path.join(mos_dem_dir,'multiview_*_median_mos.tif'))[0] 289 | print("Aligning DEMs") 290 | workflow.alignment_wrapper_single(coreg_dem,source_dem=median_mos_dem,max_displacement=40, 291 | outprefix=os.path.join(alignment_dir,'run')) 292 | 293 | if 8 in steps2run: 294 | # this steps aligns the frame camera models 295 | camera_list = sorted(glob.glob(os.path.join(init_ba,'run-run-*.tsai'))) 296 | print(f"Detected {len(camera_list)} cameras to be registered to DEM") 297 | alignment_vector = glob.glob(os.path.join(alignment_dir,'alignment_vector.txt'))[0] 298 | if not os.path.exists(aligned_cam_dir): 299 | os.makedirs(aligned_cam_dir) 300 | print("Aligning cameras") 301 | workflow.align_cameras_wrapper(input_camera_list=camera_list,transform_txt=alignment_vector, 302 | outfolder=aligned_cam_dir) 303 | 304 | if 9 in steps2run: 305 | # this produces final georegistered orthomosaics 306 | georegistered_median_dem = glob.glob(os.path.join(alignment_dir,'run-trans_*DEM.tif'))[0] 307 | print("Running final orthomsaic creation") 308 | workflow.execute_skysat_orhtorectification(images=img_list,data='triplet',session=final_ortho_session, 309 | outdir=final_ortho_dir,tsrs=epsg_code,dem=georegistered_median_dem, 310 | ba_prefix=os.path.join(aligned_cam_dir,'run-run'),mode='science', 311 | overlap_list=overlap_stereo_txt,copy_rpc=0,orthomosaic=1) 312 | 313 | if 10 in steps2run: 314 | # this produces a final plot of orthoimage,DEM, NMAD and countmaps 315 | ortho = glob.glob(os.path.join(final_ortho_dir,'*finest_orthomosaic.tif'))[0] 316 | count = glob.glob(os.path.join(mos_dem_dir,'*count*.tif'))[0] 317 | nmad = glob.glob(os.path.join(mos_dem_dir,'*nmad*.tif'))[0] 318 | georegistered_median_dem = glob.glob(os.path.join(alignment_dir,'run-trans_*DEM.tif'))[0] 319 | print("plotting final figure") 320 | misc.plot_composite_fig(ortho,georegistered_median_dem,count,nmad,outfn=final_figure) 321 | 322 | if __name__ == '__main__': 323 | main() 324 | -------------------------------------------------------------------------------- /scripts/skysat_video_pipeline.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import subprocess 3 | import argparse 4 | import os,sys,glob,shutil 5 | from rpcm import geo 6 | import numpy as np 7 | import geopandas as gpd 8 | from distutils.spawn import find_executable 9 | from skysat_stereo import misc_geospatial as misc 10 | from skysat_stereo import asp_utils as asp 11 | from skysat_stereo import skysat 12 | from skysat_stereo import bundle_adjustment_lib as ba 13 | from skysat_stereo import skysat_stereo_workflow as workflow 14 | 15 | """ 16 | Script for running the full pipeline based on workflow described in ISPRS 2020 submission 17 | Need to specify input image folder, input refrence DEM folder 18 | """ 19 | def getparser(): 20 | parser = argparse.ArgumentParser(description='Wrapper script to run full video workflow') 21 | parser.add_argument('-in_img',default=None,type=str,help='path to Folder containing L1A imagery') 22 | parser.add_argument('-frame_index',default=None,type=str,help='path to frame_index.csv containing atitude and ephmeris information') 23 | parser.add_argument('-orthodem',default=None,type=str,help='path to Reference DEM to use in orthorectification and camera resection, if not provided, will use coregdem') 24 | parser.add_argument('-produce_low_res_for_ortho',type=int,choices=[1,0],default = 1, 25 | help='use hole-filled low res DEM produced from bundle-adjusted camera for orthorectification, (default: %(default)s)') 26 | parser.add_argument('-coregdem',default=None,type=str,help='path to reference DEM to use in coregisteration') 27 | parser.add_argument('-mask_dem',default=1,type=int,choices=[1,0],help='mask reference DEM for static surfaces before coreg (default: %(default)s)') 28 | mask_opt = ['glaciers','glaciers+nlcd'] 29 | parser.add_argument('-mask_dem_opt',default='glaciers',choices=mask_opt,help='surfaces to mask if -mask_dem=1, default is glaciers which uses RGI polygons.\ 30 | If processing in CONUS, the option of glaciers+nlcd also additionaly masks out forest surfaces') 31 | parser.add_argument('-ortho_workflow',default=1,type=int,choices=[1,0],help='option to orthorectify before stereo or not') 32 | parser.add_argument('-block_matching',default=0,type=int,choices=[1,0],help='whether to use block matching in final stereo matching, default is 0 (not)') 33 | parser.add_argument('-mvs', default=0, type=int, choices=[1,0], help='1: Use multiview stereo triangulation for video data\ 34 | , do matching with next 20 slave for each master image/camera (defualt: %(default)s') 35 | parser.add_argument('-job_name',default=None,type=str,help='identifier for output folder and final composite products') 36 | parser.add_argument('-outfolder',default=None,type=str,help='path to output folder to save results in') 37 | bin_choice = [1,0] 38 | parser.add_argument('-full_workflow',choices=bin_choice,type=int,default=1,help='Specify 1 to run full workflow (default: %(default)s)') 39 | parser.add_argument('-partial_workflow_steps',nargs='*',help='specify steps of workflow to run') 40 | return parser 41 | 42 | def main(): 43 | parser = getparser() 44 | args = parser.parse_args() 45 | img_folder = args.in_img 46 | coreg_dem = args.coregdem 47 | if args.orthodem is not None: 48 | ortho_dem = args.orthodem 49 | else: 50 | ortho_dem = coreg_dem 51 | frame_index = args.frame_index 52 | 53 | # Check for input files 54 | img_list = glob.glob(os.path.join(img_folder,'*.tif'))+glob.glob(os.path.join(img_folder,'*.tiff')) 55 | if len(img_list)<2: 56 | print(f"Only {len(img_list)} images detected, exiting") 57 | sys.exit() 58 | if not os.path.exists(coreg_dem): 59 | print(f"Coreg dem {coreg_dem} could not be located, exiting") 60 | sys.exit() 61 | if not os.path.exists(ortho_dem): 62 | print(f"Ortho dem {ortho_dem} could not be located, exiting") 63 | sys.exit() 64 | if not os.path.exists(frame_index): 65 | print(f"Input frame index file {frame_index} file could not be located, exiting") 66 | sys.exit() 67 | 68 | # structure for output folder 69 | out_fol = os.path.join(args.outfolder,'proc_out') 70 | job_name = args.job_name 71 | 72 | #Universal Args 73 | if args.ortho_workflow == 1: 74 | map = True 75 | else: 76 | map = False 77 | 78 | # For consistency, lets hardcode expected file names,folder names here 79 | bound_fn = os.path.join(out_fol,job_name+'_bound_2km.gpkg') 80 | 81 | # step1 outputs 82 | # this is preprocessing step 83 | cam_gcp_directory = os.path.join(out_fol,'camgen_cam_gcp') 84 | 85 | # step2 outputs 86 | # this is bundle_adjustment step 87 | init_ba = os.path.join(out_fol,'ba_pinhole') 88 | ba_prefix = os.path.join(init_ba,'run') 89 | 90 | # step3 outputs 91 | # this is stereo reconstruction step 92 | init_ortho_dir = os.path.join(out_fol,'init_ortho') 93 | init_stereo_dir = os.path.join(out_fol,'init_block_stereo') 94 | intermediate_ortho_dir = os.path.join(out_fol,'intermediate_ortho') 95 | final_stereo_dir = os.path.join(out_fol,'final_pinhole_stereo') 96 | 97 | # step4, dem gridding and mosaicing 98 | mos_dem_dir = os.path.join(final_stereo_dir,'composite_dems') 99 | 100 | # step5, dem_alignment 101 | alignment_dir = os.path.join(out_fol,'georegistered_dem_mos') 102 | 103 | # step6, camera alignment 104 | aligned_cam_dir = os.path.join(out_fol,'georegistered_cameras') 105 | 106 | # step7, final orthorectification 107 | final_ortho_dir = os.path.join(out_fol,'georegistered_orthomosaics') 108 | 109 | # step 8, plot figure 110 | final_figure = os.path.join(out_fol,f"{job_name}_result.jpg") 111 | 112 | # step 10, experimental rpc production 113 | 114 | if args.full_workflow == 1: 115 | steps2run = np.arange(0,10) # run the entire 9 steps 116 | else: 117 | steps2run = np.array(args.partial_workflow_steps).astype(int) 118 | 119 | #workflow_steps 120 | # create output directory 121 | if not os.path.exists(out_fol): 122 | os.makedirs(out_fol) 123 | # copy coreg_dem and ortho_dem to folder 124 | # if parallel runs on different nodes use the same DEM, then will have issues 125 | refdem_dir = os.path.join(out_fol,'refdem') 126 | if not os.path.exists(refdem_dir): 127 | os.makedirs(refdem_dir) 128 | shutil.copy2(coreg_dem,os.path.join(refdem_dir,os.path.basename(coreg_dem))) 129 | if coreg_dem != ortho_dem: 130 | diff_dem = True 131 | shutil.copy2(ortho_dem,os.path.join(refdem_dir,os.path.basename(ortho_dem))) 132 | else: 133 | diff_dem = False 134 | # replace old variable names 135 | coreg_dem = os.path.join(refdem_dir,os.path.basename(coreg_dem)) 136 | ortho_dem = os.path.join(refdem_dir,os.path.basename(ortho_dem)) 137 | 138 | 139 | print("Computing Target UTM zones for orthorectification") 140 | gdf_frame_index = skysat.parse_frame_index(frame_index) 141 | gdf_buffer = gpd.GeoDataFrame({'idx':[0],'geometry':gdf_frame_index.unary_union},crs={'init':'epsg:4326'}) 142 | clon,clat = [gdf_buffer.centroid.x.values[0],gdf_buffer.centroid.y.values[0]] 143 | epsg_code = f'EPSG:{geo.compute_epsg(clon,clat)}' 144 | print(f"Detected UTM zone is {epsg_code}") 145 | if not os.path.exists(bound_fn): 146 | print("Creating buffered shapefile") 147 | gdf_proj = gdf_buffer.to_crs(epsg_code) 148 | # buffer by 2 km 149 | gdf_proj['geometry'] = gdf_proj.buffer(2000) 150 | gdf_proj.to_file(bound_fn,driver='GPKG') 151 | 152 | print("Cropping reference DEMs to extent of SkySat footprint + 1 km buffer") 153 | misc.clip_raster_by_shp_disk(coreg_dem,bound_fn) 154 | misc.ndvtrim_function(os.path.splitext(coreg_dem)[0]+'_shpclip.tif') 155 | coreg_dem = os.path.splitext(coreg_dem)[0]+'_shpclip_trim.tif' 156 | if diff_dem: 157 | misc.clip_raster_by_shp_disk(ortho_dem,bound_buffer_fn) 158 | misc.ndvtrim_function(os.path.splitext(ortho_dem)[0]+'_shpclip.tif') 159 | ortho_dem = os.path.splitext(ortho_dem)[0]+'_shpclip_trim.tif' 160 | else: 161 | ortho_dem = coreg_dem 162 | 163 | 164 | if 1 in steps2run: 165 | print("Sampling video sequence and generating Frame Cameras") 166 | cam_gen_log = workflow.skysat_preprocess(img_folder,mode='video',product_level='l1a', 167 | outdir=cam_gcp_directory,sampler=60,sampling='num_images',frame_index_fn=frame_index, 168 | dem=ortho_dem) 169 | # read the frame_index.csv which contains the info for sampled scenes only 170 | print(cam_gcp_directory) 171 | #now point to the subsampled frame_index file 172 | frame_index = glob.glob(os.path.join(cam_gcp_directory,'*frame*.csv'))[0] 173 | 174 | if 2 in steps2run: 175 | # this is bundle adjustment step 176 | print("Running bundle adjustment for the input video sequence") 177 | 178 | ba.bundle_adjust_stable(img=img_folder,ba_prefix=ba_prefix,cam=cam_gcp_directory, 179 | session='nadirpinhole',num_iter=2000,num_pass=3,gcp=cam_gcp_directory, 180 | frame_index=frame_index,mode='full_video') 181 | 182 | 183 | if 3 in steps2run: 184 | # this is stereo step 185 | # we need to check for 2 steps 186 | # is map turned to true ? 187 | # if map true, is low resolution block matching DEM to be used in stereo ? 188 | # so lets process first assuming map is untrue 189 | if not map: 190 | if args.mvs == 1: 191 | print("MVS not implemented on non-orthorectified scenes, exiting") 192 | sys.exit() 193 | workflow.execute_skysat_stereo(img_folder,final_stereo_dir, 194 | ba_prefix=ba_prefix+'-run',mode='video',threads=2, 195 | session='nadirpinhole',frame_index=frame_index,sampling_interval=10, 196 | full_extent=1) 197 | 198 | else: 199 | if args.produce_low_res_for_ortho == 1: 200 | # will need to produce low res dem using block matching on L1A images and bundle adjusted cameras 201 | # this was used for the 2 St. Helen's case studies in SkySat stereo manuscript 202 | print("Running stereo with block matching for producing consistent orthorectification DEM") 203 | workflow.execute_skysat_stereo(img_folder,init_stereo_dir, 204 | ba_prefix=ba_prefix+'-run',mode='video',threads=2, 205 | session='nadirpinhole',frame_index=frame_index,sampling_interval=10, 206 | full_extent=1,block=1,texture='low') 207 | 208 | # query point clouds 209 | pc_list = sorted(glob.glob(os.path.join(init_stereo_dir,'12*/run-PC.tif'))) 210 | # grid into DEMs 211 | print("Gridding block matching point clouds") 212 | workflow.gridding_wrapper(pc_list,tr=4,tsrs=epsg_code) 213 | dem_list = sorted(glob.glob(os.path.join(init_stereo_dir,'12*/run-DEM.tif'))) 214 | hole_filled_low_res_dem = os.path.join(init_stereo_dir,'block_matching_hole_filled_dem_mos.tif') 215 | workflow.dem_mosaic_holefill_wrapper(input_dem_list=dem_list, 216 | output_dem_path=hole_filled_low_res_dem) 217 | dem_for_ortho = hole_filled_low_res_dem 218 | else: 219 | # this argument will use input orhtodem (used for camera resection) as input for orthorectification 220 | dem_for_ortho = ortho_dem 221 | 222 | print("Running intermediate orthorectification") 223 | workflow.execute_skysat_orhtorectification(images=img_folder,session='pinhole', 224 | outfolder=intermediate_ortho_dir,frame_index_fn=frame_index,tsrs=epsg_code, 225 | dem=dem_for_ortho,mode='science',data='video',ba_prefix=ba_prefix+'-run') 226 | ## Now run final stereo 227 | 228 | print("Running final stereo reconstruction") 229 | workflow.execute_skysat_stereo(intermediate_ortho_dir,final_stereo_dir, 230 | ba_prefix=ba_prefix+'-run',mode='video',session='pinholemappinhole', 231 | frame_index=frame_index,sampling_interval=10,full_extent=1, 232 | dem=dem_for_ortho,mvs=args.mvs,block=args.block_matching) 233 | 234 | 235 | 236 | if 4 in steps2run: 237 | pc_list = sorted(glob.glob(os.path.join(final_stereo_dir,'12*/run-PC.tif'))) 238 | print(f"Identified {len(pc_list)} clouds") 239 | # this is dem gridding followed by mosaicing 240 | workflow.gridding_wrapper(pc_list,tr=2,tsrs=epsg_code) 241 | print("Mosaicing DEMs") 242 | workflow.dem_mosaic_wrapper(final_stereo_dir,mode='video',out_folder=mos_dem_dir) 243 | 244 | 245 | if 5 in steps2run: 246 | 247 | # this is DEM alignment step 248 | # add option to mask coreg_dem for static surfaces 249 | # might want to remove glaciers, forest et al. before coregisteration 250 | # this can potentially be done in asp_utils step 251 | # actually use dem_mask.py with options of nlcd, nlcd_filter (not_forest) and of course RGI glacier polygons 252 | if args.mask_dem == 1: 253 | # this might change for non-US sites, best to use bareground files 254 | if args.mask_dem_opt == 'glaciers': 255 | mask_list = ['glaciers'] 256 | elif args.msak_dem_opt == 'glaciers+nlcd': 257 | mask_list = ['nlcd','glaciers'] 258 | print("Masking reference DEM to static surfaces") 259 | misc.dem_mask_disk(mask_list,os.path.abspath(coreg_dem)) 260 | coreg_dem = os.path.splitext(coreg_dem)[0]+'_ref.tif' 261 | 262 | # now perform alignment 263 | median_mos_dem = glob.glob(os.path.join(mos_dem_dir,'video_median_mos.tif'))[0] 264 | # use the composite filtered by count and NMAD metrics 265 | median_mos_dem_filt = glob.glob(os.path.join(mos_dem_dir,'video_median_mos_filt*.tif'))[0] 266 | print("Aligning DEMs") 267 | workflow.alignment_wrapper_single(coreg_dem,source_dem=median_mos_dem_filt, 268 | max_displacement=100,outprefix=os.path.join(alignment_dir,'run')) 269 | 270 | 271 | if 6 in steps2run: 272 | # this steps aligns the frame camera models 273 | camera_list = sorted(glob.glob(os.path.join(init_ba,'run-run-*.tsai'))) 274 | print(f"Detected {len(camera_list)} cameras to be registered to DEM") 275 | alignment_vector = glob.glob(os.path.join(alignment_dir,'alignment_vector.txt'))[0] 276 | if not os.path.exists(aligned_cam_dir): 277 | os.makedirs(aligned_cam_dir) 278 | print("Aligning cameras") 279 | workflow.align_cameras_wrapper(input_camera_list=camera_list,transform_txt=alignment_vector, 280 | outfolder=aligned_cam_dir) 281 | 282 | 283 | if 7 in steps2run: 284 | # this produces final georegistered orthomosaics 285 | georegistered_median_dem = glob.glob(os.path.join(alignment_dir,'run-trans_*DEM.tif'))[0] 286 | print("Running final orthomsaic creation") 287 | workflow.execute_skysat_orhtorectification(images=img_folder,session='pinhole', 288 | out_folder=final_ortho_dir,tsrs=epsg_code,dem=georegistered_median_dem, 289 | mode='science',orthomosaic=1,data='video',ba_prefix=os.path.join(aligned_cam_dir,'run-run')) 290 | 291 | if 8 in steps2run: 292 | # this produces a final plot of orthoimage,DEM, NMAD and countmaps 293 | ortho = glob.glob(os.path.join(final_ortho_dir,'*median_orthomosaic.tif'))[0] 294 | count = glob.glob(os.path.join(mos_dem_dir,'*count*.tif'))[0] 295 | nmad = glob.glob(os.path.join(mos_dem_dir,'*nmad*.tif'))[0] 296 | georegistered_median_dem = glob.glob(os.path.join(alignment_dir,'run-trans_*DEM.tif'))[0] 297 | print("plotting final figure") 298 | misc.plot_composite_fig(ortho,georegistered_median_dem,count,nmad,outfn=final_figure,product='video') 299 | 300 | if __name__ == '__main__': 301 | main() 302 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from distutils.core import setup 4 | 5 | setup(name='skysat_stereo', 6 | version='0.1', 7 | description='library for DEM generation workflows from Planet SkySat-C imagery ', 8 | author='Shashank Bhushan and Team 3D', 9 | author_email='sbaglapl@uw.edu', 10 | license='MIT', 11 | long_description=open('README.md').read(), 12 | url='https://github.com/uw-cryo/skysat_stereo.git', 13 | packages=['skysat_stereo'], 14 | install_requires=['requests'] 15 | ) 16 | -------------------------------------------------------------------------------- /skysat_stereo/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | __all__=['asp_utils','skysat','misc_geospatial','skysat_stereo_workflow','bundle_adjustment_lib'] 4 | -------------------------------------------------------------------------------- /skysat_stereo/bundle_adjustment_lib.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import os,sys,glob,shutil 3 | import subprocess 4 | import argparse 5 | from distutils.spawn import find_executable 6 | from pygeotools.lib import iolib,malib 7 | import geopandas as gpd 8 | import numpy as np 9 | from datetime import datetime 10 | import pandas as pd 11 | from multiprocessing import cpu_count 12 | 13 | def run_cmd(bin, args, **kw): 14 | # Note, need to add full executable 15 | # from dshean/vmap.py 16 | #binpath = os.path.join('/home/sbhushan/src/StereoPipeline/bin',bin) 17 | #binpath = find_executable(bin) 18 | binpath = '/nobackupp16/swbuild3/sbhusha1/StereoPipeline-3.1.1-alpha-2022-10-31-x86_64-Linux/bin/bundle_adjust' 19 | if binpath is None: 20 | msg = ("Unable to find executable %s\n" 21 | "Install ASP and ensure it is in your PATH env variable\n" 22 | "https://ti.arc.nasa.gov/tech/asr/intelligent-robotics/ngt/stereo/") 23 | sys.exit(msg) 24 | # binpath = os.path.join('/opt/StereoPipeline/bin/',bin) 25 | call = [binpath, ] 26 | print(call) 27 | call.extend(args) 28 | print(call) 29 | # print(type(call)) 30 | # print(' '.join(call)) 31 | try: 32 | code = subprocess.call(call, shell=False) 33 | except OSError as e: 34 | raise Exception('%s: %s' % (binpath, e)) 35 | if code != 0: 36 | raise Exception('ASP step ' + kw['msg'] + ' failed') 37 | 38 | 39 | def get_ba_opts(ba_prefix, ip_per_tile=4000,camera_weight=None,translation_weight=0.4,rotation_weight=0,fixed_cam_idx=None,overlap_list=None, robust_threshold=None, overlap_limit=None, initial_transform=None, input_adjustments=None, flavor='general_ba', session='nadirpinhole', gcp_transform=False,num_iterations=2000,num_pass=2,lon_lat_limit=None,elevation_limit=None): 40 | ba_opt = [] 41 | # allow CERES to use multi-threads 42 | ba_opt.extend(['--threads', str(cpu_count())]) 43 | #ba_opt.extend(['--threads', '1']) 44 | ba_opt.extend(['-o', ba_prefix]) 45 | 46 | # keypoint-finding args 47 | # relax triangulation error based filters to account for initial camera errors 48 | ba_opt.extend(['--min-matches', '4']) 49 | ba_opt.extend(['--disable-tri-ip-filter']) 50 | ba_opt.extend(['--force-reuse-match-files']) 51 | ba_opt.extend(['--ip-per-tile', str(ip_per_tile)]) 52 | ba_opt.extend(['--ip-inlier-factor', '0.2']) 53 | ba_opt.extend(['--ip-num-ransac-iterations', '1000']) 54 | ba_opt.extend(['--skip-rough-homography']) 55 | ba_opt.extend(['--min-triangulation-angle', '0.0001']) 56 | 57 | # Save control network created from match points 58 | ba_opt.extend(['--save-cnet-as-csv']) 59 | 60 | # Individually normalize images to properly stretch constrant 61 | # Helpful in keypoint detection 62 | ba_opt.extend(['--individually-normalize']) 63 | 64 | if robust_threshold is not None: 65 | # make the solver focus more on mininizing very high reporjection errors 66 | ba_opt.extend(['--robust-threshold', str(robust_threshold)]) 67 | 68 | if camera_weight is not None: 69 | # this generally assigns weight to penalise movement of camera parameters (Default:0) 70 | ba_opt.extend(['--camera-weight', str(camera_weight)]) 71 | else: 72 | # this is more fine grained, will pinalize translation but allow rotation parameters update 73 | ba_opt.extend(['--translation-weight',str(translation_weight)]) 74 | ba_opt.extend(['--rotation-weight',str(rotation_weight)]) 75 | 76 | if fixed_cam_idx is not None: 77 | # parameters for cameras at the specified indices will not be floated during optimisation 78 | ba_opt.extend(['--fixed-camera-indices',' '.join(fixed_cam_idx.astype(str))]) 79 | ba_opt.extend(['-t', session]) 80 | 81 | # filter points based on reprojection errors before running a new pass 82 | ba_opt.extend(['--remove-outliers-params', '75 3 5 6']) 83 | 84 | # How about adding num random passes here ? Think about it, it might help if we are getting stuck in local minima :) 85 | if session == 'nadirpinhole': 86 | ba_opt.extend(['--inline-adjustments']) 87 | # write out a new camera model file with updated parameters 88 | 89 | # specify number of passes and maximum iterations per pass 90 | ba_opt.extend(['--num-iterations', str(num_iterations)]) 91 | ba_opt.extend(['--num-passes', str(num_pass)]) 92 | #ba_opt.extend(['--parameter-tolerance','1e-14']) 93 | 94 | if gcp_transform: 95 | ba_opt.extend(['--transform-cameras-using-gcp']) 96 | 97 | if initial_transform: 98 | ba_opt.extend(['--initial-transform', initial_transform]) 99 | if input_adjustments: 100 | ba_opt.extend(['--input-adjustments', input_adjustments]) 101 | 102 | # these 2 parameters determine which image pairs to use for feature matching 103 | # only the selected pairs are used in formation of the bundle adjustment control network 104 | # video is a sequence of overlapping scenes, so we use an overlap limit 105 | # triplet stereo uses list of overlapping pairs 106 | if overlap_limit: 107 | ba_opt.extend(['--overlap-limit',str(overlap_limit)]) 108 | if overlap_list: 109 | ba_opt.extend(['--overlap-list', overlap_list]) 110 | 111 | # these two params are not used generally. 112 | if lon_lat_limit: 113 | ba_opt.extend(['--lon-lat-limit',str(lon_lat_limit[0]),str(lon_lat_limit[1]),str(lon_lat_limit[2]),str(lon_lat_limit[3])]) 114 | if elevation_limit: 115 | ba_opt.extend(['--elevation-limit',str(elevation_limit[0]),str(elevation_limit[1])]) 116 | 117 | return ba_opt 118 | 119 | def bundle_adjust_stable(img,ba_prefix,cam=None,session='rpc',initial_transform=None, 120 | input_adjustments=None,overlap_list=None,gcp=None, 121 | mode='full_triplet',bound=None,camera_param2float='trans+rot', 122 | dem=None,num_iter=2000,num_pass=2,frame_index=None): 123 | """ 124 | """ 125 | img_list = sorted(glob.glob(os.path.join(img,'*.tif'))) 126 | if len(img_list) < 2: 127 | img_list = sorted(glob.glob(os.path.join(img, '*.tiff'))) 128 | #img_list = [os.path.basename(x) for x in img_list] 129 | if os.path.islink(img_list[0]): 130 | img_list = [os.readlink(x) for x in img_list] 131 | if overlap_list is not None: 132 | # need to remove images and cameras which are not optimised during bundle adjustment 133 | # read pairs from input overlap list 134 | initial_count = len(img_list) 135 | with open(overlap_list) as f: 136 | content = f.readlines() 137 | content = [x.strip() for x in content] 138 | l_img = [x.split(' ')[0] for x in content] 139 | r_img = [x.split(' ')[1] for x in content] 140 | total_img = l_img + r_img 141 | uniq_idx = np.unique(total_img, return_index=True)[1] 142 | img_list = [total_img[idx] for idx in sorted(uniq_idx)] 143 | print(f"Out of the initial {initial_count} images, {len(img_list)} will be orthorectified using adjusted cameras") 144 | 145 | if cam is not None: 146 | #cam = os.path.abspath(cam) 147 | if 'run' in os.path.basename(cam): 148 | cam_list = [glob.glob(cam+'-'+os.path.splitext(os.path.basename(x))[0]+'*.tsai')[0] for x in img_list] 149 | print("No of cameras is {}".format(len(cam_list))) 150 | 151 | else: 152 | cam_list = [glob.glob(os.path.join(cam,os.path.splitext(os.path.basename(x))[0]+'*.tsai'))[0] for x in img_list] 153 | if gcp is not None: 154 | gcp_list = sorted(glob.glob(os.path.join(args.gcp, '*.gcp'))) 155 | if bound: 156 | bound = gpd.read_file(args.bound) 157 | geo_crs = {'init':'epsg:4326'} 158 | if bound.crs is not geo_crs: 159 | bound = bound.to_crs(geo_crs) 160 | lon_min,lat_min,lon_max,lat_max = bound.total_bounds 161 | if camera_param2float == 'trans+rot': 162 | cam_wt = 0 163 | else: 164 | # this will invoke adjustment with rotation weight of 0 and translation weight of 0.4 165 | cam_wt = None 166 | print(f"Camera weight is {cam_wt}") 167 | 168 | if dem: 169 | dem = iolib.fn_getma(dem) 170 | dem_stats = malib.get_stats_dict(dem) 171 | min_elev,max_elev = [dem_stats['min']-500,dem_stats['max']+500] 172 | dem = None 173 | if mode == 'full_triplet': 174 | if overlap_list is None: 175 | print( 176 | "Attempted bundle adjust will be expensive, will try to find matches in each and every pair") 177 | # the concept is simple 178 | #first 3 cameras, and then corresponding first three cameras from next collection are fixed in the first go 179 | # these serve as a kind of #GCP, preventing a large drift in the triangulated points/camera extrinsics during optimization 180 | img_time_identifier_list = np.array([os.path.basename(img).split('_')[1] for img in img_list]) 181 | img_time_unique_list = np.unique(img_time_identifier_list) 182 | second_collection_list = np.where(img_time_identifier_list == img_time_unique_list[1])[0][[0,1,2]] 183 | fix_cam_idx = np.array([0,1,2]+list(second_collection_list)) 184 | print(type(fix_cam_idx)) 185 | round1_opts = get_ba_opts( 186 | ba_prefix, session=session,num_iterations=num_iter,num_pass=num_pass, 187 | fixed_cam_idx=fix_cam_idx,overlap_list=overlap_list,camera_weight=cam_wt) 188 | # enter round2_opts here only ? 189 | if session == 'nadirpinhole': 190 | ba_args = img_list+ cam_list 191 | else: 192 | ba_args = img_list 193 | print("Running round 1 bundle adjustment for given triplet stereo combination") 194 | run_cmd('bundle_adjust', round1_opts+ba_args) 195 | 196 | # Save the first and foremost bundle adjustment reprojection error file 197 | init_residual_fn_def = sorted(glob.glob(ba_prefix+'*initial*residuals*pointmap*.csv'))[0] 198 | init_residual_fn = os.path.splitext(init_residual_fn_def)[0]+'_initial_reproj_error.csv' 199 | init_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*initial_residuals_raw_pixels.txt'))[0] 200 | init_per_cam_reproj_err_disk = os.path.splitext(init_per_cam_reproj_err)[0]+'_initial_per_cam_reproj_error.txt' 201 | init_cam_stats = sorted(glob.glob(ba_prefix+'-*initial_residuals_stats.txt'))[0] 202 | init_cam_stats_disk = os.path.splitext(init_cam_stats)[0]+'_initial_camera_stats.txt' 203 | shutil.copy2(init_residual_fn_def,init_residual_fn) 204 | shutil.copy2(init_per_cam_reproj_err,init_per_cam_reproj_err_disk) 205 | shutil.copy2(init_cam_stats,init_cam_stats_disk) 206 | 207 | if session == 'nadirpinhole': 208 | identifier = os.path.basename(cam_list[0]).split('_',14)[0][:2] 209 | print(ba_prefix+'-{}*.tsai'.format(identifier)) 210 | cam_list = sorted(glob.glob(os.path.join(ba_prefix+ '-{}*.tsai'.format(identifier)))) 211 | ba_args = img_list+cam_list 212 | fixed_cam_idx2 = np.delete(np.arange(len(img_list),dtype=int),fix_cam_idx) 213 | round2_opts = get_ba_opts(ba_prefix, overlap_list=overlap_list,session=session, 214 | fixed_cam_idx=fixed_cam_idx2,camera_weight=cam_wt) 215 | else: 216 | # round 1 is adjust file 217 | # Only camera model parameters for the first three stereo pairs float in this round 218 | input_adjustments = ba_prefix 219 | round2_opts = get_ba_opts( 220 | ba_prefix, overlap_limit, input_adjustments=ba_prefix, flavor='2round_gcp_2', session=session, 221 | elevation_limit=[min_elev,max_elev],lon_lat_limit=[lon_min,lat_min,lon_max,lat_max]) 222 | ba_args = img_list+gcp_list 223 | 224 | 225 | print("running round 2 bundle adjustment for given triplet stereo combination") 226 | run_cmd('bundle_adjust', round2_opts+ba_args) 227 | 228 | # Save state for final condition reprojection errors for the sparse triangulated points 229 | final_residual_fn_def = sorted(glob.glob(ba_prefix+'*final*residuals*pointmap*.csv'))[0] 230 | final_residual_fn = os.path.splitext(final_residual_fn_def)[0]+'_final_reproj_error.csv' 231 | shutil.copy2(final_residual_fn_def,final_residual_fn) 232 | final_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*final_residuals_raw_pixels.txt'))[0] 233 | final_per_cam_reproj_err_disk = os.path.splitext(final_per_cam_reproj_err)[0]+'_final_per_cam_reproj_error.txt' 234 | final_cam_stats = sorted(glob.glob(ba_prefix+'-*final_residuals_stats.txt'))[0] 235 | final_cam_stats_disk = os.path.splitext(final_cam_stats)[0]+'_final_camera_stats.txt' 236 | shutil.copy2(final_per_cam_reproj_err,final_per_cam_reproj_err_disk) 237 | shutil.copy2(final_cam_stats,final_cam_stats_disk) 238 | 239 | elif mode == 'full_video': 240 | df = pd.read_csv(frame_index) 241 | # block to determine automatically overlap limit of 40 seconds for computing match points 242 | df['dt'] = [datetime.strptime(date.split('+00:00')[0],'%Y-%m-%dT%H:%M:%S.%f') for date in df.datetime.values] 243 | delta = (df.dt.values[1]-df.dt.values[0])/np.timedelta64(1, 's') 244 | # i hardocde overlap limit to have 40 seconds coverage 245 | overlap_limit = np.int(np.ceil(40/delta)) 246 | print("Calculated overlap limit as {}".format(overlap_limit)) 247 | img_list = [glob.glob(os.path.join(img,'*{}*.tiff'.format(x)))[0] for x in df.name.values] 248 | cam_list = [glob.glob(os.path.join(cam,'*{}*.tsai'.format(x)))[0] for x in df.name.values] 249 | gcp_list = [glob.glob(os.path.join(gcp,'*{}*.gcp'.format(x)))[0] for x in df.name.values] 250 | #also append the clean gcp here 251 | print(os.path.join(gcp,'*clean*_gcp.gcp')) 252 | gcp_list.append(glob.glob(os.path.join(gcp,'*clean*_gcp.gcp'))[0]) 253 | round1_opts = get_ba_opts( 254 | ba_prefix, overlap_limit=overlap_limit, flavor='2round_gcp_1', session=session,ip_per_tile=4000, 255 | num_iterations=num_iter,num_pass=num_pass,camera_weight=cam_wt,fixed_cam_idx=None,robust_threshold=None) 256 | print("Running round 1 bundle adjustment for input video sequence") 257 | if session == 'nadirpinhole': 258 | ba_args = img_list+cam_list 259 | else: 260 | ba_args = img_list 261 | # Check if this command executed till last 262 | print('Running bundle adjustment round1') 263 | run_cmd('bundle_adjust', round1_opts+ba_args) 264 | 265 | # Make files used to evaluate solution quality 266 | init_residual_fn_def = sorted(glob.glob(ba_prefix+'*initial*residuals*pointmap*.csv'))[0] 267 | init_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*initial_residuals_*raw_pixels.txt'))[0] 268 | init_per_cam_reproj_err_disk = os.path.splitext(init_per_cam_reproj_err)[0]+'_initial_per_cam_reproj_error.txt' 269 | init_residual_fn = os.path.splitext(init_residual_fn_def)[0]+'_initial_reproj_error.csv' 270 | shutil.copy2(init_residual_fn_def,init_residual_fn) 271 | shutil.copy2(init_per_cam_reproj_err,init_per_cam_reproj_err_disk) 272 | 273 | # Copy final reprojection error files before transforming cameras 274 | final_residual_fn_def = sorted(glob.glob(ba_prefix+'*final*residuals*pointmap*.csv'))[0] 275 | final_residual_fn = os.path.splitext(final_residual_fn_def)[0]+'_final_reproj_error.csv' 276 | final_per_cam_reproj_err = sorted(glob.glob(ba_prefix+'-*final_residuals*_raw_pixels.txt'))[0] 277 | final_per_cam_reproj_err_disk = os.path.splitext(final_per_cam_reproj_err)[0]+'_final_per_cam_reproj_error.txt' 278 | shutil.copy2(final_residual_fn_def,final_residual_fn) 279 | shutil.copy2(final_per_cam_reproj_err,final_per_cam_reproj_err_disk) 280 | 281 | if session == 'nadirpinhole': 282 | # prepare for second run to apply a constant transform to the self-consistent models using initial ground footprints 283 | identifier = os.path.basename(cam_list[0]).split(df.name.values[0])[0] 284 | print(ba_prefix+identifier+'-{}*.tsai'.format(df.name.values[0])) 285 | cam_list = [glob.glob(ba_prefix+identifier+'-{}*.tsai'.format(img))[0] for img in df.name.values] 286 | print(len(cam_list)) 287 | ba_args = img_list+cam_list+gcp_list 288 | 289 | round2_opts = get_ba_opts( 290 | ba_prefix, overlap_limit = overlap_limit, flavor='2round_gcp_2', session=session, gcp_transform=True,camera_weight=0, 291 | num_iterations=0,num_pass=1) 292 | 293 | print("running round 2 bundle adjustment for input video sequence") 294 | run_cmd('bundle_adjust', round2_opts+ba_args) 295 | -------------------------------------------------------------------------------- /skysat_stereo/misc_geospatial.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | import matplotlib 3 | matplotlib.use('Agg') 4 | 5 | import os,sys,glob 6 | import numpy as np 7 | import pandas as pd 8 | import geopandas as gpd 9 | from imview import pltlib 10 | import matplotlib.pyplot as plt 11 | from pygeotools.lib import iolib,geolib,warplib,malib 12 | from demcoreg import dem_mask 13 | 14 | def shp_merger(shplist): 15 | """ 16 | merge multiple geopandas shapefiles into 1 multi-row shapefile 17 | Parameters 18 | ------------ 19 | shplist: list 20 | list of geopandas shapefiles 21 | Returns 22 | ------------ 23 | gpd_merged: geopandas geodataframe 24 | merged multirow shapefile 25 | """ 26 | #Taken from here: "https://stackoverflow.com/questions/48874113/concat-multiple-shapefiles-via-geopandas" 27 | gpd_merged = pd.concat([shp for shp in shplist]).pipe(gpd.GeoDataFrame) 28 | return gpd_merged 29 | 30 | def plot_composite_fig(ortho,dem,count,nmad,outfn,product='triplet'): 31 | """ 32 | Plot the gallery figure for final DEM products 33 | Parameters 34 | ------------ 35 | ortho: str 36 | path to orthoimage 37 | dem: str 38 | path to dem 39 | count: str 40 | path to count map 41 | nmad: str 42 | path to NMAD 43 | outfn: str 44 | path to save output figure 45 | ortho: str 46 | product to plot (triplet/video) 47 | """ 48 | if product == 'triplet': 49 | figsize=(10,8) 50 | else: 51 | figsize=(10,3) 52 | f,ax = plt.subplots(1,4,figsize=figsize) 53 | ds_list = warplib.memwarp_multi_fn([ortho,dem,count,nmad],res='max') 54 | ortho,dem,count,nmad = [iolib.ds_getma(x) for x in ds_list] 55 | pltlib.iv(ortho,ax=ax[0],cmap='gray',scalebar=True,cbar=False,ds=ds_list[0],skinny=False) 56 | pltlib.iv(dem,ax=ax[1],hillshade=True,scalebar=False,ds=ds_list[1],label='Elevation (m WGS84)',skinny=False) 57 | pltlib.iv(count,ax=ax[2],cmap='YlOrRd',label='DEM count',skinny=False) 58 | pltlib.iv(nmad,ax=ax[3],cmap='inferno',clim=(0,10),label='Elevation NMAD (m)',skinny=False) 59 | plt.tight_layout() 60 | f.savefig(outfn,dpi=300,bbox_inches='tight',pad_inches=0.1) 61 | 62 | def clip_raster_by_shp_disk(r_fn,shp_fn,extent='raster',invert=False,out_fn=None): 63 | """ 64 | # this is a lightweight version of directly being used from https://github.com/dshean/pygeotools/blob/master/pygeotools/clip_raster_by_shp.py 65 | # meant to limit subprocess calls 66 | """ 67 | if not os.path.exists(r_fn): 68 | sys.exit("Unable to find r_fn: %s" % r_fn) 69 | if not os.path.exists(shp_fn): 70 | sys.exit("Unable to find shp_fn: %s" % shp_fn) 71 | #Do the clipping 72 | r, r_ds = geolib.raster_shpclip(r_fn, shp_fn,extent=extent,invert=invert) 73 | if not out_fn: 74 | out_fn = os.path.splitext(r_fn)[0]+'_shpclip.tif' 75 | iolib.writeGTiff(r, out_fn, r_ds) 76 | 77 | def ndvtrim_function(src_fn): 78 | """ 79 | # this is a direct port from https://github.com/dshean/pygeotools/blob/master/pygeotools/trim_ndv.py 80 | # intended to make it a function 81 | """ 82 | if not iolib.fn_check(src_fn): 83 | sys.exit("Unable to find src_fn: %s" % src_fn) 84 | 85 | #This is a wrapper around gdal.Open() 86 | src_ds = iolib.fn_getds(src_fn) 87 | src_gt = src_ds.GetGeoTransform() 88 | 89 | print("Loading input raster into masked array") 90 | bma = iolib.ds_getma(src_ds) 91 | 92 | print("Computing min/max indices for mask") 93 | edge_env = malib.edgefind2(bma, intround=True) 94 | 95 | print("Updating output geotransform") 96 | out_gt = list(src_gt) 97 | #This should be OK, as edge_env values are integer multiples, and the initial gt values are upper left pixel corner 98 | #Update UL_X 99 | out_gt[0] = src_gt[0] + src_gt[1]*edge_env[2] 100 | #Update UL_Y, note src_gt[5] is negative 101 | out_gt[3] = src_gt[3] + src_gt[5]*edge_env[0] 102 | out_gt = tuple(out_gt) 103 | 104 | 105 | out_fn = os.path.splitext(src_fn)[0]+'_trim.tif' 106 | print("Writing out: %s" % out_fn) 107 | #Extract valid subsection from input array 108 | #indices+1 are necessary to include valid row/col on right and bottom edges 109 | iolib.writeGTiff(bma[edge_env[0]:edge_env[1]+1, edge_env[2]:edge_env[3]+1], out_fn, src_ds, gt=out_gt) 110 | bma = None 111 | 112 | def dem_mask_disk(mask_list,dem_fn): 113 | """ 114 | This is lightweight version ported from here for convinence: https://github.com/dshean/demcoreg/blob/master/demcoreg/dem_mask.py 115 | """ 116 | dem_ds = iolib.fn_getds(dem_fn) 117 | print(dem_fn) 118 | #Get DEM masked array 119 | dem = iolib.ds_getma(dem_ds) 120 | print("%i valid pixels in original input tif" % dem.count()) 121 | newmask = dem_mask.get_mask(dem_ds,mask_list,dem_fn=dem_fn) 122 | #Apply mask to original DEM - use these surfaces for co-registration 123 | newdem = np.ma.array(dem, mask=newmask) 124 | #Check that we have enough pixels, good distribution 125 | min_validpx_count = 100 126 | min_validpx_std = 10 127 | validpx_count = newdem.count() 128 | validpx_std = newdem.std() 129 | print("%i valid pixels in masked output tif to be used as ref" % validpx_count) 130 | print("%0.2f std in masked output tif to be used as ref" % validpx_std) 131 | #if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std): 132 | if (validpx_count > min_validpx_count): 133 | out_fn = os.path.splitext(dem_fn)[0]+"_ref.tif" 134 | iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds) 135 | else: 136 | print("Not enough valid pixels!") 137 | 138 | -------------------------------------------------------------------------------- /skysat_stereo/skysat.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from pygeotools.lib import iolib,geolib,warplib,malib 4 | from shapely.geometry import Polygon, Point 5 | import geopandas as gpd 6 | from skysat_stereo import asp_utils 7 | import numpy as np 8 | import pandas as pd 9 | from osgeo import gdal 10 | import os,sys,glob 11 | from shapely import wkt 12 | from osgeo import gdalconst 13 | import re 14 | from tqdm import tqdm 15 | from datetime import datetime 16 | from multiprocessing import cpu_count 17 | from p_tqdm import p_map 18 | import itertools 19 | 20 | 21 | def skysat_footprint(img_fn,incrs=None): 22 | """ 23 | Define ground corner footprint from RPC model 24 | Parameters 25 | ---------- 26 | img_fn: str 27 | path to image with embedded RPC info in tiff tag 28 | incrs: dict 29 | crs to convert the final footprint into, by default the footprint is returned in geographic coordinates (EPSG:4326) 30 | Returns 31 | ---------- 32 | footprint_shp: geopandas geodataframe 33 | geodataframe containg ground footprints in specified incrs 34 | """ 35 | 36 | if os.path.islink(img_fn): 37 | img_ds = iolib.fn_getds(os.readlink(img_fn)) 38 | else: 39 | img_ds = iolib.fn_getds(img_fn) 40 | nx = img_ds.RasterXSize 41 | ny = img_ds.RasterYSize 42 | #img_coord (0,0), (nx,0), (nx,ny), (0,ny) correspond to ul,ur,lr,ll 43 | z = float(img_ds.GetMetadata('RPC')['HEIGHT_OFF']) 44 | #z = np.float(ht.split(' ',1)[1].splitlines()[0]) 45 | img_x = [0,nx,nx,0] 46 | img_y = [0,0,ny,ny] 47 | img_z = [z,z,z,z] #should ideally accept a common height above datum, read from rpc #done 48 | mx,my = asp_utils.rpc2map(img_fn,img_x,img_y,img_z) 49 | coord_list = list(zip(mx,my)) 50 | footprint_poly = Polygon(coord_list) 51 | geo_crs = 'EPSG:4326' 52 | footprint_shp = gpd.GeoDataFrame({'img':[img_fn],'geometry':[footprint_poly]},crs=geo_crs) 53 | if incrs: 54 | footprint_shp = footprint_shp.to_crs(incrs) 55 | return footprint_shp 56 | 57 | def parse_frame_index(frame_index,df_only=False): 58 | """ 59 | Parse L1A frame_index.csv as a geodataframe/dataframe 60 | Parameters 61 | ---------- 62 | frame_index: str 63 | Path to frame_index.csv file 64 | df_only: bool 65 | if True, the the function returns a dataframe, else, it returns a geodataframe 66 | Returns 67 | ---------- 68 | out: GeoPandas GeoDataframe/ Pandas Dataframe 69 | output type depends on input to df_only argument 70 | """ 71 | df = pd.read_csv(frame_index) 72 | geo_crs = {'init':'epsg:4326'} 73 | df_rec = df.copy() 74 | df_rec['geom'] = df_rec['geom'].apply(fix_polygon_wkt) 75 | gdf = gpd.GeoDataFrame(df_rec,geometry='geom',crs=geo_crs) 76 | if df_only: 77 | out = df 78 | else: 79 | out = gdf 80 | return out 81 | 82 | def fix_polygon_wkt(string): 83 | # from Scott Henderson's notebook 84 | """ 85 | returns shapely geometry from reformatted WKT 86 | Parameters 87 | ---------- 88 | string: str 89 | wkt string to be formatted 90 | Returns 91 | ---------- 92 | out: str 93 | fixed wkt string 94 | """ 95 | pre = string[:-2] 96 | first_point = string.split(',')[0].split('(')[-1] 97 | fixed = '{},{}))'.format(pre,first_point) 98 | return wkt.loads(fixed) 99 | 100 | def copy_rpc(in_img,out_img): 101 | """ 102 | Copy rpc info from 1 image to target image 103 | Parameters 104 | ---------- 105 | in_img: str 106 | Path to input image from which RPC will be copied 107 | out_img: str 108 | Path to output image to which RPC will be copied 109 | """ 110 | rpc_fn = in_img 111 | non_rpc_fn = out_img 112 | rpc_img = gdal.Open(rpc_fn, gdalconst.GA_ReadOnly) 113 | non_rpc_img = gdal.Open(non_rpc_fn, gdalconst.GA_Update) 114 | rpc_data = rpc_img.GetMetadata('RPC') 115 | non_rpc_img.SetMetadata(rpc_data,'RPC') 116 | print("Copying rpc from {} to {}".format(in_img,out_img)) 117 | del(rpc_img) 118 | del(non_rpc_img) 119 | 120 | def crop_sim_res_extent(img_list, outfol, vrt=False,rpc=False): 121 | """ 122 | Warp images to common 'finest' resolution and intersecting extent 123 | This is useful for stereo processing with mapprojected imagery with the skysat pairs 124 | 125 | Parameters 126 | ---------- 127 | img_list: list 128 | list containing two images 129 | outfol: str 130 | path to folder where warped images will be saved 131 | vrt: bool 132 | Produce warped VRT instead of geotiffs if True 133 | rpc: bool 134 | Copy RPC information to warped images if True 135 | Returns 136 | ---------- 137 | out: list 138 | list containing the two warped images, first entry (left image) is the image which was of finer resolution (more nadir) initially 139 | If the images do not intersect, two None objects are returned in the list 140 | """ 141 | #resample_alg = 'lanczos' 142 | resample_alg = 'cubic' 143 | img1 = img_list[0] 144 | img2 = img_list[1] 145 | img1_ds = iolib.fn_getds(img1) 146 | img2_ds = iolib.fn_getds(img2) 147 | res1 = geolib.get_res(img1_ds, square=True)[0] 148 | res2 = geolib.get_res(img2_ds, square=True)[0] 149 | # set left image as higher resolution, this is repeated for video, but 150 | # good for triplet with no gsd information 151 | if res1 < res2: 152 | l_img = img1 153 | r_img = img2 154 | res = res1 155 | else: 156 | l_img = img2 157 | r_img = img1 158 | res = res2 159 | # ASP stereo command expects the input to be .tif/.tiff, complains for .vrt 160 | # Try to save with vrt driver but a tif extension ? 161 | l_img_warp = os.path.join(outfol, os.path.splitext(os.path.basename(l_img))[0] + '_warp.tif') 162 | r_img_warp = os.path.join(outfol, os.path.splitext(os.path.basename(r_img))[0] + '_warp.tif') 163 | if not (os.path.exists(l_img_warp)): 164 | # can turn on verbose during qa/qc 165 | # Better to turn off during large runs, writing takes time 166 | verbose = False 167 | if not os.path.exists(outfol): 168 | os.makedirs(outfol) 169 | try: 170 | #this will simply break and continue if the images do not intersect 171 | ds_list = warplib.memwarp_multi_fn([l_img,r_img], r=resample_alg, verbose=verbose, res='min', extent = 'intersection') 172 | if vrt: 173 | extent = geolib.ds_extent(ds_list[0]) 174 | res = geolib.get_res(ds_list[0], square=True) 175 | vrt_options = gdal.BuildVRTOptions(resampleAlg='average',resolution='user',xRes=res[0],yRes=res[1],outputBounds=tuple(extent)) 176 | l_vrt = gdal.BuildVRT(l_img_warp, [l_img, ], options=vrt_options) 177 | r_vrt = gdal.BuildVRT(r_img_warp, [r_img, ], options=vrt_options) 178 | # close vrt to save to disk 179 | l_vrt = None 180 | r_vrt = None 181 | out = [l_img_warp, r_img_warp] 182 | else: 183 | # I am opting out of writing out vrt, to prevent correlation 184 | # artifacts. GeoTiffs will be written out in the meantime 185 | l_img_ma = iolib.ds_getma(ds_list[0]) 186 | r_img_ma = iolib.ds_getma(ds_list[1]) 187 | iolib.writeGTiff(l_img_ma, l_img_warp, ds_list[0]) 188 | iolib.writeGTiff(r_img_ma, r_img_warp, ds_list[1]) 189 | out = [l_img_warp, r_img_warp] 190 | del(ds_list) 191 | if rpc: 192 | copy_rpc(l_img,l_img_warp) 193 | copy_rpc(r_img,r_img_warp) 194 | except BaseException: 195 | out = None 196 | else: 197 | out = [l_img_warp, r_img_warp] 198 | return out 199 | 200 | def video_mvs(img_folder,t,cam_fol=None,ba_prefix=None,dem=None,sampling_interval=None,texture=None,crop_map=False,outfol=None,frame_index=None,block=0): 201 | """ 202 | Builds subprocess job list for video collection multiview implementation (explained below) adapted from dAngelo 2016 203 | - this is mvs implementation 204 | - each input master view will be jointly triangulated with the next 20 views 205 | - Sampling interval will be used to select master views 206 | # Note: ASP's mutliview only supports homography alignment. This will likely fail over steep terrain given the small skysat footprint. 207 | Therefore I prefer to use Mapprojected images with alignment=None option ! and results have only been evaluated for mapprojected images 208 | 209 | Parameters 210 | ---------- 211 | img_folder: str 212 | Path to image folder 213 | t: str 214 | Session to use for stereo processing 215 | cam_fol: str 216 | Folder containing tsai camera models (None if using RPC models or using bundle adjusted tsai cameras) 217 | ba_prefix: str 218 | ba_prefix for locating the refined tsai camera models, or for locating the *.adjust files for RPC bundle adjusted cameras 219 | dem: str 220 | Path to DEM used for mapprojection 221 | sampling_interval: int 222 | Number of equally spaced master views to be selected 223 | texture: str 224 | use option 'low' input image texture is low, 'normal' for normal textured images. This is used for determining the correlation and refinement kernel 225 | crop_map: bool 226 | crop images to map extent if True. Should always be False for video dataset 227 | outfol: str 228 | Path to master output folder where the stereo results will be saved 229 | frame_index: Geopandas GeoDataframe or Pandas Dataframe 230 | dataframe/geodataframe formed from the truncated frame_index.csv written by skysat_preprocess.py. Will be used in determining images to be processed 231 | block: int 232 | Select 0 for the defualt MGM matching, 1 for block matching 233 | 234 | Returns 235 | ---------- 236 | job_list: list 237 | list of stereo jobs build on the given parameters 238 | """ 239 | # only experimented with frame camera models 240 | job_list = [] 241 | img_list = [glob.glob(os.path.join(img_folder,'{}*.tif'.format(frame)))[0] for frame in frame_index.name.values] # read images 242 | # Read Cameras 243 | if ba_prefix: 244 | cam_list = [glob.glob(ba_prefix + '-' + frame + '*.tsai')[0] for frame in frame_index.name.values] 245 | else: 246 | cam_list = [glob.glob(os.path.join(cam_fol, frame + '*.tsai'))[0] for frame in frame_index.name.values] 247 | num_pairs = 20 # can be accepted as input 248 | # Compute equally spaced indices for the master images to be chosen 249 | ref_idx = np.linspace(0,len(img_list)-1-num_pairs,sampling_interval,dtype=np.int) 250 | source_idexs = [list(np.arange(idx+1,idx+1+num_pairs)) for idx in ref_idx] #this is list of list containing source_ids for corresponding reference_id 251 | if os.path.islink(img_list[0]): 252 | symlink = True 253 | else: 254 | symlink = False 255 | # This loop prepares the jobs 256 | for i in tqdm(range(0, len(ref_idx))): 257 | total_images = len(img_list) 258 | if symlink: 259 | ref_image = os.path.islink(img_list[ref_idx[i]]) 260 | source_images = [os.path.islink(img_list[source_idexs[i][k]]) for k in range(len(source_idexs[i]))] 261 | else: 262 | ref_image = img_list[ref_idx[i]] 263 | source_images = [img_list[source_idexs[i][k]] for k in range(len(source_idexs[i]))] 264 | ref_camera = cam_list[ref_idx[i]] 265 | source_cameras = [cam_list[source_idexs[i][k]] for k in range(len(source_idexs[i]))] 266 | print('Number of source images: {}'.format(len(source_images))) 267 | ref_prefix = os.path.splitext(os.path.basename(ref_image))[0] 268 | outstr = ref_prefix + '_mvs' 269 | outfolder = os.path.join(outfol, outstr) 270 | source_prefixes = [os.path.splitext(os.path.basename(x))[0] for x in source_images] 271 | if 'map' in ref_prefix: 272 | ref_prefix = ref_prefix.split('_map', 15)[0] 273 | source_prefixes = [x.split('_map', 15)[0] for x in source_prefixes] 274 | ba = None 275 | stereo_args = [ref_image] + source_images + [ref_camera] + source_cameras 276 | if block == 1: 277 | spm = 2 278 | stereo_mode = 'asp_bm' 279 | corr_tile_size = 1024 280 | if texture == 'low': 281 | rfne_kernel = [21, 21] 282 | corr_kernel = [35, 35] 283 | lv = 5 284 | else: 285 | rfne_kernel = [15, 15] 286 | corr_kernel = [21, 21] 287 | lv = 5 288 | else: 289 | spm = 2 290 | stereo_mode = 'asp_mgm' 291 | corr_tile_size = 6400 292 | if texture == 'low': 293 | rfne_kernel = [21, 21] 294 | corr_kernel = [9, 9] 295 | lv = 5 296 | else: 297 | rfne_kernel = [15, 15] 298 | corr_kernel = [7, 7] 299 | lv = 5 300 | stereo_opt = asp_utils.get_stereo_opts(session=t,align='None',lv=lv,corr_kernel=corr_kernel,rfne_kernel=rfne_kernel,stereo_mode=stereo_mode,spm=spm,cost_mode=4,corr_tile_size=corr_tile_size,mvs=True) 301 | outfolder = outfolder + '/run' 302 | print(outfolder) 303 | stereo_args = stereo_args + [outfolder] 304 | if 'map' in t: 305 | stereo_args = stereo_args + [dem] 306 | job_list.append(stereo_opt + stereo_args) 307 | return job_list 308 | 309 | def prep_video_stereo_jobs(img_folder,t,threads=4,cam_fol=None,ba_prefix=None,dem=None,sampling_interval=None,texture=None,crop_map=False,outfol=None,frame_index=None,block=0,full_extent=False,entry_point='pprc'): 310 | """ 311 | Builds subprocess job list for video collection pairwise implementation 312 | 313 | Parameters 314 | ---------- 315 | img_folder: str 316 | Path to image folder 317 | threads: int 318 | number of threads to use for each stereo step 319 | t: str 320 | Session to use for stereo processing 321 | cam_fol: str 322 | Folder containing tsai camera models (None if using RPC models or using bundle adjusted tsai cameras) 323 | ba_prefix: str 324 | ba_prefix for locating the refined tsai camera models, or for locating the *.adjust files for RPC bundle adjusted cameras 325 | dem: str 326 | Path to DEM used for mapprojection 327 | sampling_interval: int 328 | interval at which source images will be chosen for sequential reference images in the list 329 | texture: str 330 | use option 'low' input image texture is low, 'normal' for normal textured images. This is used for determining the correlation and refinement kernel 331 | crop_map: bool 332 | crop images to map extent if True. Cropping to common resolution and extent should give best results in mapprojected images 333 | outfol: str 334 | Path to master output folder where the stereo results will be saved 335 | frame_index: Geopandas GeoDataframe or Pandas Dataframe 336 | dataframe/geodataframe formed from the truncated frame_index.csv written by skysat_preprocess.py. Will be used in determining images to be processed 337 | block: int 338 | Select 0 for the defualt MGM matching, 1 for block matching 339 | full_extent: bool 340 | If True, stereo pairs with smaller baselines (sampling interval of 5) will be padded at the beginning and end of the video sequence 341 | 342 | Returns 343 | ---------- 344 | job_list: list 345 | list of stereo jobs build on the given parameters 346 | """ 347 | 348 | #check here if the sampling is greater than 10 seconds 349 | # if this is the case, then output DEM is not going to capture the full extent of the video 350 | # so after initial pairing at user defined rates, append to list pairs at this range at the starting and end. 351 | # check the interval first 352 | try: 353 | img_list = [glob.glob(os.path.join(img_folder,'{}*.tiff'.format(frame)))[0] for frame in frame_index.name.values] 354 | except: 355 | img_list = [glob.glob(os.path.join(img_folder,'{}*.tif'.format(frame)))[0] for frame in frame_index.name.values] 356 | if 'pinhole' in t: 357 | if ba_prefix: 358 | cam_list = [glob.glob(ba_prefix + '-' + frame + '*.tsai')[0] for frame in frame_index.name.values] 359 | else: 360 | cam_list = [glob.glob(os.path.join(cam_fol, frame + '*.tsai'))[0] for frame in frame_index.name.values] 361 | print("Sampling interval is {}".format(sampling_interval)) 362 | #find min baseline between first and next in seconds 363 | dt_list = [datetime.strptime(date.split('+00:00')[0],'%Y-%m-%dT%H:%M:%S.%f') for date in frame_index.datetime.values] 364 | min_sec = (dt_list[sampling_interval]-dt_list[0]).total_seconds() #this is interval between 1 and next stereo pair image 365 | succesive_sec = (dt_list[1]-dt_list[0]).total_seconds() #this is interval between 2 images in the sequence 366 | 367 | end_point = len(img_list)-1-sampling_interval 368 | ref_idx = np.linspace(0,end_point,num=end_point+1,dtype=int) 369 | source_idx = ref_idx+sampling_interval 370 | print("seleceted {} stereo pairs by default".format(len(source_idx))) 371 | # if the sampling interval causes the image pairs to be formed at higher interval, and full_extent is chosen, then this will be used for padding stereo pairs (See docstring). 372 | if (min_sec > 10) & full_extent: 373 | #need to maintain 10 seconds interval minimum 374 | #stereo resulst are poor for lower intervals than that 375 | secondary_interval = np.int(np.round(10/succesive_sec)) 376 | print("will buffer start and end frames with interval of {}".format(secondary_interval)) 377 | end_point1 = source_idx[0]-secondary_interval 378 | ref_1 = np.linspace(ref_idx[0],end_point1,num=end_point1-ref_idx[0]+1,dtype=int) 379 | source_1 = ref_1+secondary_interval 380 | end_point2 = source_idx[-1]-secondary_interval 381 | ref_2 = np.linspace(ref_idx[-1],end_point2,num=end_point2-ref_idx[-1]+1,dtype=int) 382 | source_2 = ref_2+secondary_interval 383 | ref_idx = list(ref_idx)+list(ref_1)+list(ref_2) 384 | source_idx = list(source_idx)+list(source_1)+list(source_2) 385 | print("added additional {} stereo pairs".format(len(source_1)+len(source_2))) 386 | job_list = [] 387 | if os.path.islink(img_list[0]): 388 | symlink = True 389 | else: 390 | symlink = False 391 | # Build jobs 392 | for i in tqdm(range(0, len(ref_idx))): 393 | if symlink: 394 | in_img_1 = os.readlink(img_list[ref_idx]) 395 | in_img_2 = os.readlink(img_list[source_idx]) 396 | else: 397 | in_img_1 = img_list[ref_idx[i]] 398 | in_img_2 = img_list[source_idx[i]] 399 | img1 = os.path.basename(in_img_1) 400 | img2 = os.path.basename(in_img_2) 401 | pref_1 = os.path.splitext(img1)[0] 402 | pref_2 = os.path.splitext(img2)[0] 403 | # print(pref_1) 404 | if 'map' in t: 405 | pref_1 = pref_1.split('_PAN', 15)[0] + '_PAN' 406 | pref_2 = pref_2.split('_PAN', 15)[0] + '_PAN' 407 | mask1 = frame_index['name'] == pref_1 408 | mask2 = frame_index['name'] == pref_2 409 | df_img1 = frame_index[mask1] 410 | df_img2 = frame_index[mask2] 411 | gsd1 = df_img1.gsd.values[0] 412 | gsd2 = df_img2.gsd.values[0] 413 | if 'pinhole' in t: 414 | cam_1 = cam_list[ref_idx[i]] 415 | cam_2 = cam_list[source_idx[i]] 416 | # always map stereo disparity with finer resolution image as reference 417 | if gsd1 < gsd2: 418 | in_img1 = in_img_1 419 | in_img2 = in_img_2 420 | pref1 = pref_1 421 | pref2 = pref_2 422 | if 'pinhole' in t: 423 | cam1 = cam_1 424 | cam2 = cam_2 425 | else: 426 | in_img1 = in_img_2 427 | in_img2 = in_img_1 428 | pref1 = pref_2 429 | pref2 = pref_1 430 | if 'pinhole' in t: 431 | cam2 = cam_1 432 | cam1 = cam_2 433 | convergence = np.round(asp_utils.convergence_angle(df_img1.sat_az.values[0],df_img1.sat_elev.values[0],df_img2.sat_az.values[0],df_img2.sat_elev.values[0]),2) 434 | outstr = '{}_{}_{}'.format(pref1,pref2,convergence) 435 | outfolder = os.path.join(outfol, outstr) 436 | if 'pinhole' in t: 437 | if t == 'pinholemappinhole': 438 | in_img1, in_img2 = crop_sim_res_extent( 439 | [in_img1, in_img2], outfolder) 440 | ba = None 441 | outfolder = outfolder + '/run' 442 | stereo_args = [in_img1, in_img2, cam1, cam2, outfolder] 443 | else: 444 | stereo_args = [in_img1, in_img2, outfolder] 445 | if ba_prefix: 446 | ba = ba_prefix 447 | else: 448 | ba = None 449 | if 'map' in t: 450 | align = 'None' 451 | stereo_args.append(dem) 452 | else: 453 | align = 'AffineEpipolar' 454 | # add as list ? 455 | if block == 1: 456 | print("Performing block matching") 457 | spm = 2 #Bayes EM 458 | stereo_mode = 'asp_bm' #Block matching 459 | cost_mode = 2 #NCC 460 | corr_tile_size = 1024 461 | if texture == 'low': 462 | rfne_kernel = [21, 21] 463 | corr_kernel = [35, 35] 464 | lv = 2 465 | else: 466 | rfne_kernel = [15, 15] 467 | corr_kernel = [21, 21] 468 | lv = 5 469 | else: 470 | cost_mode = 4 #Preffered MGM cost-mode 471 | spm = 2 # Bayes EM 472 | stereo_mode = 'asp_mgm' #MGM 473 | corr_tile_size = 6400 474 | if texture == 'low': 475 | rfne_kernel = [21, 21] 476 | corr_kernel = [9, 9] 477 | lv = 2 478 | else: 479 | rfne_kernel = [15, 15] 480 | corr_kernel = [7, 7] 481 | lv = 5 482 | stereo_opt = asp_utils.get_stereo_opts(session=t,threads=threads,ba_prefix=ba,align=align,lv=lv,corr_kernel=corr_kernel,rfne_kernel=rfne_kernel,stereo_mode=stereo_mode,spm=spm,cost_mode=cost_mode,corr_tile_size=corr_tile_size) 483 | print(stereo_opt + stereo_args) 484 | job_list.append(stereo_opt + stereo_args) 485 | return job_list 486 | 487 | def prepare_stereo_jobs_wrapper(img1,img2,img_list,outfolder,t,threads=2,crop_map=False,ba_prefix=None, 488 | cam_fol=None,dem=None,block=False,texture='normal',entry_point='pprc'): 489 | """ 490 | pairwise job preparation wrapper, intended to help in parallelization 491 | Parameters 492 | ------------ 493 | img1: str 494 | path to first image 495 | img2: str 496 | path to second image 497 | img_list: str 498 | master list containing path to all input images 499 | outfolder: str 500 | path to stereo folder 501 | t: str 502 | Session to use for stereo processing 503 | threads: int 504 | number of threads to use for 1 pairwise processing 505 | ba_prefix: str 506 | ba_prefix for locating the refined tsai camera models, or for locating the *.adjust files for RPC bundle adjusted cameras 507 | cam_fol: str 508 | Folder containing tsai camera models (None if using RPC models or using bundle adjusted tsai cameras 509 | dem: str 510 | Path to DEM used for mapprojection 511 | texture: str 512 | use option 'low' input image texture is low, 'normal' for normal textured images. This is used for determining the correlation and refinement kernel 513 | crop_map: bool 514 | crop images to map extent if True. Cropping to common resolution and extent should give best results in mapprojected images 515 | block: int 516 | Select 0 for the defualt MGM matching, 1 for block matching 517 | entry_point: str 518 | Select stage from which to start ASP processing (pprc,corr,rfne,fltr,tri) 519 | """ 520 | 521 | IMG1 = os.path.splitext(os.path.basename(img1))[0] 522 | IMG2 = os.path.splitext(os.path.basename(img2))[0] 523 | out = outfolder + '/' + IMG1 + '__' + IMG2 524 | 525 | # camera session 526 | if 'rpc' in t: 527 | rpc = True 528 | else: 529 | rpc = False 530 | # https://www.geeksforgeeks.org/python-finding-strings-with-given-substring-in-list/ 531 | try: 532 | img1 = [x for x in img_list if re.search(IMG1, x)][0] 533 | img2 = [x for x in img_list if re.search(IMG2, x)][0] 534 | 535 | 536 | except BaseException: 537 | print("Images not found") 538 | return 539 | 540 | if 'map' in t: 541 | out = out + '_map' 542 | try: 543 | if crop_map: 544 | 545 | in_img1, in_img2 = crop_sim_res_extent([img1, img2], out,rpc=rpc) 546 | 547 | else: 548 | in_img1, in_img2 = [img1,img2] 549 | 550 | except BaseException: 551 | return 552 | 553 | else: 554 | in_img1 = img1 555 | in_img2 = img2 556 | 557 | out = os.path.join(out, 'run') 558 | IMG1 = os.path.splitext(os.path.basename(in_img1))[0] 559 | IMG2 = os.path.splitext(os.path.basename(in_img2))[0] 560 | if 'map' in t: 561 | IMG1 = IMG1.split('_map',15)[0] 562 | IMG2 = IMG2.split('_map',15)[0] 563 | 564 | # look for camera files 565 | if 'pinhole' in t: 566 | if ba_prefix: 567 | cam1 = glob.glob( 568 | os.path.abspath(ba_prefix) + '-' + IMG1 + '*.tsai')[0] 569 | cam2 = glob.glob( 570 | os.path.abspath(ba_prefix) + '-' + IMG2 + '*.tsai')[0] 571 | else: 572 | cam1 = glob.glob(os.path.join(os.path.abspath(cam_fol),'*'+IMG1 + '*.tsai'))[0] 573 | cam2 = glob.glob(os.path.join(os.path.abspath(cam_fol),'*'+IMG2 + '*.tsai'))[0] 574 | stereo_args = [in_img1, in_img2, cam1, cam2, out] 575 | align = 'AffineEpipolar' 576 | ba = None 577 | 578 | # for rpc model 579 | elif 'rpc' in t: 580 | stereo_args = [in_img1, in_img2, out] 581 | align = 'AffineEpipolar' 582 | if ba_prefix: 583 | ba = os.path.abspath(ba_prefix) 584 | else: 585 | ba = None 586 | 587 | # add DEM if orhtorectified 588 | if 'map' in t: 589 | stereo_args.append(dem) 590 | align = 'None' 591 | 592 | # set stereo parameters 593 | if block == 1: 594 | print("Performing block matching") 595 | xcorr = 2 596 | spm = 2 597 | stereo_mode = 'asp_bm' 598 | cost_mode = 2 599 | corr_tile_size = 1024 600 | if texture == 'low': 601 | rfne_kernel = [21, 21] 602 | corr_kernel = [35, 35] 603 | lv = 5 604 | else: 605 | rfne_kernel = [15, 15] 606 | corr_kernel = [21, 21] 607 | lv = 5 608 | 609 | else: 610 | xcorr = -1 611 | cost_mode = 4 612 | spm = 9 613 | stereo_mode = 'asp_mgm' 614 | corr_tile_size = 6400 615 | if texture == 'low': 616 | rfne_kernel = [21, 21] 617 | corr_kernel = [9, 9] 618 | lv = 5 619 | else: 620 | rfne_kernel = [15, 15] 621 | corr_kernel = [7, 7] 622 | lv = 5 623 | 624 | # entry_point logic 625 | if entry_point == 'pprc': 626 | ep = 0 627 | elif entry_point == 'corr': 628 | ep = 1 629 | elif entry_point == 'rfne': 630 | ep = 3 631 | elif entry_point == 'fltr': 632 | ep = 4 633 | elif entry_point == 'tri': 634 | ep = 5 635 | 636 | # Prepare stereo options 637 | stereo_opt = asp_utils.get_stereo_opts(session=t,ep = ep, threads=threads,ba_prefix=ba, 638 | align=align,corr_kernel=corr_kernel,lv=lv,rfne_kernel=rfne_kernel,stereo_mode=stereo_mode, 639 | spm=spm,cost_mode=cost_mode,corr_tile_size=corr_tile_size,xcorr=xcorr) 640 | #print(stereo_opt) 641 | return stereo_opt + stereo_args 642 | 643 | 644 | def triplet_stereo_job_list(overlap_list,t,img_list,threads=4,ba_prefix=None,cam_fol=None,dem=None,texture='high',crop_map=True,outfol=None,block=0,entry_point='pprc',cross_track=False): 645 | """ 646 | Builds subprocess job list for triplet collection pairwise implementation 647 | 648 | Parameters 649 | ---------- 650 | overlap_list: str 651 | path to pkl file containing the overlap list and overlap percentage 652 | t: str 653 | Session to use for stereo processing 654 | threads: int 655 | number of threads to use for 1 processing 656 | img_list: list 657 | List of paths of input images 658 | ba_prefix: str 659 | ba_prefix for locating the refined tsai camera models, or for locating the *.adjust files for RPC bundle adjusted cameras 660 | cam_fol: str 661 | Folder containing tsai camera models (None if using RPC models or using bundle adjusted tsai cameras 662 | dem: str 663 | Path to DEM used for mapprojection 664 | texture: str 665 | use option 'low' input image texture is low, 'normal' for normal textured images. This is used for determining the correlation and refinement kernel 666 | crop_map: bool 667 | crop images to map extent if True. Cropping to common resolution and extent should give best results in mapprojected images 668 | outfol: str 669 | Path to master output folder where the stereo results will be saved 670 | block: int 671 | Select 0 for the defualt MGM matching, 1 for block matching 672 | entry_point: str 673 | Select stage from which to start ASP processing (pprc,corr,rfne,fltr,tri) 674 | 675 | Returns 676 | ---------- 677 | job_list: list 678 | list of stereo jobs build on the given parameters 679 | """ 680 | 681 | job_list = [] 682 | print(img_list) 683 | l_img_list = [] 684 | r_img_list = [] 685 | triplet_df = prep_trip_df(overlap_list,cross_track=cross_track) 686 | if not os.path.exists(outfol): 687 | os.makedirs(outfol) 688 | df_list = [x for _, x in triplet_df.groupby('identifier_text')] 689 | for df in df_list: 690 | outfolder = os.path.join(outfol, df.iloc[0]['identifier_text']) 691 | img1_list = df.img1.values 692 | img2_list = df.img2.values 693 | print("preparing stereo jobs") 694 | num_img = len(img1_list) 695 | job_list_ = p_map(prepare_stereo_jobs_wrapper,img1_list,img2_list,[img_list]*num_img, 696 | [outfolder]*num_img,[t]*num_img,[threads]*num_img,[crop_map]*num_img,[ba_prefix]*num_img, 697 | [cam_fol]*num_img,[dem]*num_img,[block]*num_img,[texture]*num_img,[entry_point]*num_img) 698 | 699 | print(type(job_list_)) 700 | job_list.append(job_list_) 701 | 702 | 703 | 704 | return list(itertools.chain.from_iterable(job_list)) 705 | 706 | 707 | def prep_trip_df(overlap_list, true_stereo=True,cross_track=False): 708 | """ 709 | Prepare dataframe from input plckle file containing overlapping images with percentages 710 | Parameters 711 | ---------- 712 | overlap_list: str 713 | Path to pickle file containing overlapping images produced from skysat_overlap_parallel.py 714 | true_stereo: bool 715 | True means output dataframe has only pairs fromed by scenes from different views 716 | Returns 717 | ---------- 718 | df: Pandas Dataframe 719 | dataframe cotianing list of plausible overlapping stereo pairs 720 | """ 721 | # check date, if date not equal drop 722 | # then check time, if time equal drop 723 | # if satellite unequal, drop 724 | # then check overlap percent 725 | # then make different folders for different time period 726 | # to add timestamp/convergence angle filter, as list grows 727 | df = pd.read_pickle(overlap_list) 728 | sat = os.path.basename(df.iloc[0]['img1']).split('_',15)[2].split('d',15)[0] 729 | ccd = os.path.basename(df.iloc[0]['img1']).split('_',15)[2].split('d',15)[1] 730 | date = os.path.basename(df.iloc[0]['img1']).split('_', 15)[0] 731 | time = os.path.basename(df.iloc[0]['img1']).split('_', 15)[1] 732 | df['sat1'] = [os.path.basename(x).split('_', 15)[2].split('d', 15)[0] for x in df.img1.values] 733 | df['sat2'] = [os.path.basename(x).split('_', 15)[2].split('d', 15)[0] for x in df.img2.values] 734 | df['date1'] = [os.path.basename(x).split('_', 15)[0] for x in df.img1.values] 735 | df['date2'] = [os.path.basename(x).split('_', 15)[0] for x in df.img2.values] 736 | df['time1'] = [os.path.basename(x).split('_', 15)[1] for x in df.img1.values] 737 | df['time2'] = [os.path.basename(x).split('_', 15)[1] for x in df.img2.values] 738 | if true_stereo: 739 | # returned df has only those pairs which form true stereo 740 | df = df[df['time1'] != df['time2']] 741 | if not cross_track: 742 | df = df[df['date1'] == df['date2']] 743 | df = df[df['sat1'] == df['sat2']] 744 | # filter to overlap percentage of around 5% 745 | df['overlap_perc'] = df['overlap_perc'] * 100 746 | df = df[(df['overlap_perc'] > 2)] 747 | df['identifier_text'] = df['date1'] + '_' + df['time1'] + '_' + df['date2'] + '_' + df['time2'] 748 | print("Number of pairs over which stereo will be attempted are {}".format(len(df))) 749 | return df 750 | 751 | def frame_intsec(img_list,proj,min_overlap): 752 | """ 753 | Compute overlapping pairs with overlap percentage 754 | 755 | Parameters 756 | ---------- 757 | img_list: list 758 | List containing paths to the two images 759 | proj: str 760 | proj4 string to transform the frames before computing overlap percentage 761 | min_overlap: float 762 | minimum overlap percentage to consider (between 0 to 1) 763 | 764 | Returns 765 | ---------- 766 | valid: bool 767 | True if frame intersect as per user defined minimum overlap percentage 768 | perc_intsect: float 769 | Float value returning percentage of overlap ( ranges from 0: no overlap to 1: full overlap) 770 | """ 771 | 772 | #shplist contains shp1,shp2 773 | img1 = img_list[0] 774 | img2 = img_list[1] 775 | shp1 = skysat_footprint(img1,proj) 776 | shp2 = skysat_footprint(img2,proj) 777 | if shp1.intersects(shp2)[0]: 778 | intsect = gpd.overlay(shp1,shp2, how='intersection') 779 | area_shp1 = shp1['geometry'].area.values[0] 780 | area_shp2 = shp2['geometry'].area.values[0] 781 | area_intsect = intsect['geometry'].area.values[0] 782 | perc_intsect = area_intsect/area_shp1 #we should be fine here with only 1 as skysat collects are mostly uniform ? 783 | if perc_intsect>=min_overlap: 784 | valid=True 785 | else: 786 | valid=False 787 | else: 788 | valid=False 789 | perc_intsect=0.0 790 | return valid,perc_intsect 791 | 792 | def sort_img_list(img_list): 793 | """ 794 | sort triplet stereo imagery into forward, nadir and aft (in the given order) 795 | The function is simple, just uses info in the filename string for now 796 | Parameters 797 | ---------- 798 | img_list: list 799 | list of triplet stereo images 800 | Returns 801 | ---------- 802 | sorted_img_list: list 803 | list of list containing filenames for images captured at 1 pushframe timestamp 804 | time_list: list 805 | list of strings containing timestamps 806 | """ 807 | #list of unique image acquisition time list 808 | time_list = sorted(list(np.unique(np.array([os.path.basename(img).split('_',15)[0]+'_'+os.path.basename(img).split('_',15)[1] for img in img_list])))) 809 | 810 | #make seperate image list 811 | #https://stackoverflow.com/questions/2152898/filtering-a-list-of-strings-based-on-contents 812 | sorted_img_list = [] 813 | for time in time_list: 814 | sorted_img_list.append([k for k in img_list if time in k]) 815 | 816 | return (sorted_img_list,time_list) 817 | 818 | def res_sort(img_list): 819 | """ 820 | sort images based on resolution, finest resolution on top 821 | Parameters 822 | ---------- 823 | img_list: list 824 | list of images to be sorted 825 | Returns 826 | ---------- 827 | sorted_img_list: list 828 | list of sorted images with finest resolution on top 829 | """ 830 | ds_list = [iolib.fn_getds(img) for img in img_list] 831 | res_list = [geolib.get_res(ds,square=True)[0] for ds in ds_list] 832 | #https://www.geeksforgeeks.org/python-sort-values-first-list-using-second-list 833 | zipped_pairs = zip(res_list, img_list) 834 | sorted_img_list = [x for _,x in sorted(zipped_pairs)] 835 | return sorted_img_list 836 | 837 | 838 | def filter_video_dem_by_nmad(ds_list,min_count=2,max_nmad=5): 839 | """ 840 | Filter Video DEM composites using NMAD and count stats 841 | This function will look for and eliminate pixels in median DEM where less than 842 | pairwise DEMs contributed and their vertical variability (NMAD) is higher than 843 | Parameters 844 | ----------- 845 | ds_list: list 846 | list of gdal datasets, containing median, count and nmad composites in order 847 | min_count: numeric 848 | minimum count to use in filtering 849 | max_nmad: numeric 850 | maximum NMAD variability to filter, if count is also <= min_count 851 | Returns 852 | ----------- 853 | dem_filt: masked array 854 | filtered DEM 855 | count_filt_c: masked array 856 | filtered NMAD map 857 | nmad_filt_c: masked array 858 | filtered count map 859 | """ 860 | 861 | dem = iolib.ds_getma(ds_list[0]) 862 | count = iolib.ds_getma(ds_list[1]) 863 | nmad = iolib.ds_getma(ds_list[2]) 864 | 865 | nmad_filt = np.ma.masked_where(nmad>5,nmad) 866 | count_filt = np.ma.masked_where(count<=2,count) 867 | print(type(nmad_filt.mask)) 868 | invalid_mask = np.logical_and(nmad_filt.mask,count_filt.mask) 869 | nmad_filt_c = np.ma.array(nmad_filt,mask = invalid_mask) 870 | count_filt_c = np.ma.array(count_filt,mask = invalid_mask) 871 | dem_filt = np.ma.array(dem,mask = invalid_mask) 872 | return dem_filt,count_filt_c,nmad_filt_c 873 | 874 | 875 | def modernize_frame_index(frame_index_fn,return_frame_index=True,outfn=None): 876 | """ 877 | Update frame_index to what ASP understands currently, i.e., 878 | Update name columns and geometry columns 879 | 880 | Parameters 881 | ------------- 882 | frame_index_fn: string 883 | path to frame_index 884 | outfn (Optional): string 885 | path to output frame_index filename 886 | """ 887 | from shapely import wkt 888 | from shapely.geometry.polygon import orient 889 | 890 | def _correct_geom(row): 891 | return wkt.loads(row['geom']) 892 | frame_index = pd.read_csv(frame_index_fn) 893 | frame_index['geom'] = frame_index.apply(_correct_geom,axis=1) 894 | 895 | # orient the Polygon geometry 896 | updated_geomlist_asp_convention = [orient(test_geom,-1) for test_geom in frame_index['geom'].values] 897 | 898 | # remove the space between POLYGON and ((# 899 | # this might not be required in the new release 900 | updated_geomlist_asp_convention = [f"POLYGON(({str(test_geom).split(' ((')[1]}" for test_geom in updated_geomlist_asp_convention] 901 | 902 | # remove the repeated last coordinate 903 | updated_geomlist_asp_convention = [','.join(test_geom.split(',')[:-1])+'))' for test_geom in updated_geomlist_asp_convention] 904 | 905 | # update geometry column 906 | frame_index['geom'] = updated_geomlist_asp_convention 907 | 908 | # update name 909 | frame_index['name'] = [os.path.splitext(name)[0] for name in frame_index.filename.values] 910 | print(os.path.splitext(frame_index.filename.values[0])[0]) 911 | 912 | # writeout 913 | if not outfn: 914 | outfn = os.path.splitext(frame_index_fn)[0] + '_with_orient.csv' 915 | frame_index.to_csv(outfn,index=False) 916 | 917 | if return_frame_index: 918 | return frame_index 919 | 920 | 921 | --------------------------------------------------------------------------------