├── .gitignore ├── CoastSat_PS.py ├── LICENSE ├── README.md ├── alt_environments ├── environment_alt_AUG_24_explicit.yml ├── environment_alt_AUG_24_general.yml ├── environment_alt_MAR_21_explicit.yml └── environment_alt_MAR_21_general.yml ├── coastsat_ps ├── classifier │ ├── classifier_functions.py │ ├── models │ │ └── NN_4classes_PS_NARRA.pkl │ ├── train_new_classifier.py │ └── training_data │ │ └── CoastSat_PS_training_set_NARRA_50000.pkl ├── data_import.py ├── extract_shoreline.py ├── interactive.py ├── plotting.py ├── postprocess.py ├── preprocess.py ├── preprocess_tools.py └── shoreline_tools.py ├── environment.yml ├── example_jupyter_notebook.ipynb ├── readme_files ├── extraction.png └── timeseries.png └── user_inputs ├── NARRA_polygon.kml ├── NARRA_tides.csv └── NARRA_transects.geojson /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .DS_Store 3 | *checkpoint.ipynb 4 | 5 | *.tif 6 | *.jpg 7 | *.txt 8 | *.html 9 | *.xml 10 | *.json 11 | 12 | *.png 13 | !extraction.png 14 | !timeseries.png 15 | 16 | *.pkl 17 | !NN_4classes_PS_NARRA.pkl 18 | !CoastSat_PS_training_set_NARRA_50000.pkl 19 | 20 | *.kml 21 | !NARRA_polygon.kml 22 | 23 | *.geojson 24 | !NARRA_transects.geojson 25 | 26 | *.csv 27 | !NARRA_tides.csv 28 | 29 | -------------------------------------------------------------------------------- /CoastSat_PS.py: -------------------------------------------------------------------------------- 1 | # CoastSat for PlanetScope Dove Imagery 2 | 3 | # load coastsat modules - NOTE - ensure working directory is the coastsat_ps folder 4 | from coastsat_ps.data_import import initialise_settings 5 | from coastsat_ps.extract_shoreline import extract_shorelines, compute_intersection 6 | from coastsat_ps.interactive import filter_shorelines 7 | from coastsat_ps.preprocess import (data_extract, pre_process, select_ref_image, 8 | add_ref_features) 9 | from coastsat_ps.postprocess import tidal_correction, ts_plot_single 10 | 11 | 12 | #%% 0) User Input Settings 13 | 14 | settings = { 15 | 16 | ### General Settings ### 17 | # Site name (for output folder and files) 18 | 'site_name': 'NARRA', 19 | # Maximum image cloud cover percentage threshold 20 | 'cloud_threshold': 10, # Default 10 21 | # Minimum image AOI cover percentage threshold 22 | 'extent_thresh': 80, # Default 80 23 | # Desired output shoreline epsg 24 | 'output_epsg': '28356', 25 | 26 | ### Reference files (in "...CoastSat.PlanetScope/user_inputs/") ### 27 | # Area of interest file (save as .kml file from geojson.io website) 28 | 'aoi_kml': 'NARRA_polygon.kml', 29 | # Transects in geojson file (ensure same epsg as output_epsg) 30 | 'transects': 'NARRA_transects.geojson', # False 31 | # If False boolean given, popup window will allow for manual drawing of transects 32 | # Tide csv file in MSL and UTC 33 | 'tide_data': 'NARRA_tides.csv', 34 | # Local folder planet imagery downloads location (provide full folder path) 35 | 'downloads_folder': '.../USER_PLANET_DOWNLOADS_FOLDER', 36 | 37 | ### Processing settings ### 38 | # Machine learning classifier filename (in "...CoastSat.PlanetScope/classifier/models") 39 | # A new classifier may be re-trained after step 1.3. Refer "...CoastSat.PlanetScope/classifier/train_new_classifier.py" for instructions. 40 | 'classifier': 'NN_4classes_PS_NARRA.pkl', 41 | # Image co-registration choice ['Coreg Off', 'Local Coreg', 'Global Coreg'] 42 | 'im_coreg': 'Global Coreg', # refer https://pypi.org/project/arosics/ for details on Local vs Global coreg. Local recommended but slower. 43 | # Coregistration land mask - when set to False, a new land mask is calculated for each image (slower but more accurate for large geolocation errors or where the land area changes significantly) 44 | 'generic_land_mask': True, 45 | # Set as True to keep shorelines along thin/sand-free sections of beach - increases region of allowable shoreline contours 46 | 'thin_beach_fix': False, 47 | 48 | ### Advanced settings ### 49 | # Buffer size around masked cloud pixels [in metres] 50 | 'cloud_buffer': 9, # Default 9 (3 pixels) 51 | # Max distance from reference shoreline for valid shoreline [in metres] 52 | 'max_dist_ref': 75, # Default 75 53 | # Minimum area (m^2) for an object to be labelled as a beach 54 | 'min_beach_area': 150*150, # Default 22500 55 | # Minimum length for identified contour line to be saved as a shoreline [in metres] 56 | 'min_length_sl': 500, # Default 500 57 | # GDAL location setting (Update path to match GDAL path. Update 'coastsat_ps' to chosen environment name. Example provided is for mac) 58 | 'GDAL_location': '/Users/USER_NAME/.conda/envs/coastsat_ps/bin/', 59 | # for Windows - Update 'anaconda2' to 'anaconda3' depending on installation version. 60 | # 'GDAL_location': r'C:\ProgramData\Anaconda3\envs\coastsat_ps\Library\bin', 61 | # coastsat_ps environment folder can be found with mamba using the command "mamba info -e" 62 | 63 | #### Additional advanced Settings can be found in "...CoastSat.PlanetScope/coastsat_ps/data_import.py" 64 | 65 | } 66 | 67 | 68 | # Import data and updade settings based on user input 69 | outputs = initialise_settings(settings) 70 | 71 | 72 | #%% 1.1) Pre-processing - TOA conversion and mask extraction 73 | 74 | data_extract(settings, outputs) 75 | 76 | 77 | #%% 1.2) Pre-processing - Select reference image for co-registration 78 | 79 | select_ref_image(settings, 80 | # set as true to replace previously selected ref_im 81 | replace_ref_im = False) 82 | 83 | # If the land mask region is poor, try selecting another reference image by setting replace_ref_im = True 84 | 85 | # If the land mask covers thin land regions (ie barrier islands), try adjusting the following settings: 86 | # - reduce min_beach_area (in cell 0) 87 | # - reduce land_mask_smoothing_1 and 2 (in data_import.py) 88 | 89 | # If the land mask it still poor, try retraining the classifier for your site to improve pixel classification 90 | 91 | 92 | #%% 1.3) Pre-Processing - image coregistration and scene merging 93 | 94 | raise Exception('Run cell 1.3 manually') 95 | 96 | # Due to spyder issue, select the below code and press F9 to run rather than running individual cell 97 | pre_process(settings, outputs, 98 | # del_files_int = True will delete intermediate coregistration files to save space 99 | del_files_int = True, 100 | # set as true to replace previously run preprocessing 101 | rerun_preprocess = False) 102 | 103 | # If coregistration is performing poorly, the following may help: 104 | # - try a new reference image 105 | # - reduce tie-point grid size and tie-point window size (in data_import.py) 106 | # - adjust the 'generic_land_mask' switch 107 | # - adjust the coregistration method (global or local) 108 | 109 | 110 | #%% 2.1) Select georectified/merged image for classification, reference shoreline and transects 111 | 112 | add_ref_features(settings, plot = True, redo_features = False) 113 | 114 | 115 | #%% 2.2) Extract shoreline data 116 | 117 | # Note that output shoreline .geojson file for use in GIS software is not todally corrected 118 | 119 | shoreline_data = extract_shorelines(outputs, settings, 120 | 121 | # del_index = True will delete water index .tif files once used to save space 122 | del_index = True, 123 | 124 | # set as true to replace previously extracted shorelines 125 | rerun_shorelines = False, 126 | 127 | # reclassify = True will reclassify images if they have been classified previously 128 | # useful when running again with a new classifier 129 | # use False to save time on re-runs with the same classifier to save processing time 130 | reclassify = False) 131 | 132 | # Plot parameters and layout can be adjusted in ...coastsat_ps/plotting.py 133 | 134 | 135 | #%% 3) Manual error detection 136 | 137 | # Option 1 - All images pass, creates a csv in the outputs folder: ...CoastSat.PlanetScope/outputs/SITE/shoreline outputs/COREG/NmB/Peak Fraction/shoreline_filter.csv" 138 | # manual_filter & load_csv = False 139 | 140 | # Option 2 - DEFAULT - popup window to keep or discard images (saves choices as a csv to file from option 1): 141 | # manual_filter = True & load_csv = False 142 | 143 | # Option 3 - loads and applies the csv saved from option 1 or 2. This file can be manually updated with a text editor prior to running. 144 | # manual_filter = False & load_csv = True 145 | 146 | shoreline_data = filter_shorelines(settings, 147 | manual_filter = True, load_csv = False) 148 | 149 | 150 | #%% 4) Shoreline transect intersction and csv export 151 | 152 | sl_csv = compute_intersection(shoreline_data, settings) 153 | 154 | 155 | #%% 5) Tidal Correction & filtering 156 | 157 | tide_settings = { 158 | # select beach slope as a generic value, or list of values corresponding to each transect 159 | # Transect specific beach slope values can be extracted with the CoastSat beach slope tool https://github.com/kvos/CoastSat.slope 160 | 'beach_slope': [0.085, 0.075, 0.08, 0.08, 0.1], #0.1 - Can be found using CoastSat.Slope toolbox 161 | 162 | # Reference elevation contour 163 | 'contour': 0.7, 164 | # Tidal correction weighting 165 | 'weighting': 1, 166 | # Offset correction (+ve value corrects sl seaward, ie. increases chainage) 167 | 'offset': 0, 168 | 169 | # Date filter (minimum) 170 | 'date_min':'2016-01-01', 171 | # Date filter (maximum) 172 | 'date_max':'2024-01-01' 173 | } 174 | 175 | sl_csv_tide = tidal_correction(settings, tide_settings, sl_csv) 176 | 177 | 178 | #%% 6) Plot transects 179 | 180 | for transect in settings['transects_load'].keys(): 181 | ts_plot_single(settings, sl_csv_tide, transect, 182 | 183 | # set savgol = True to plot 15 day moving average shoreline position 184 | # Requires > 15 day shorleine timeseries range 185 | savgol = True, 186 | 187 | # set x_scale for x-axis labels ['days', 'months', 'years'] 188 | x_scale = 'years') 189 | 190 | 191 | #%% Approximate times (for ~1000 downloaded images) 192 | # 1.1) 20min 193 | # 1.3) 2.5h coregistration, 35min merge 194 | # 2.2) 1h classification, 50min shoreline extraction 195 | 196 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CoastSat.PlanetScope 2 | 3 | Yarran Doherty, UNSW Water Research Laboratory 4 | 5 | First release 01/2021, useability updates 06/2024 6 | 7 | ## **Description** 8 | 9 | CoastSat.PlanetScope is an open-source extension to the CoastSat python toolkit enabling users to extract time-series of shoreline position from PlanetScope Dove satellite imagery. Similar to CoastSat, the CoastSat.PlanetScope extension utilises a machine-learning shoreline detection algorithm to classify images into sand, water, whitewater and other pixel classes prior to a sub-pixel shoreline extraction process. An additional co-registration step is implemented to minimise the impact of geo-location errors. Transect intersection and a tidal correction based on a generic beach slope is then applied to provide a timeseries of shoreline position. 10 | 11 | ![](readme_files/extraction.png) 12 | 13 | Output files include: 14 | - Shoreline timeseries .geojson file for use in GIS software (no tidal correction) 15 | - Tidally corrected shoreline transect intersection timeseries csv 16 | - Image shoreline extraction plots 17 | - Tidally corrected transect timeseries plots 18 | 19 | 20 | ## **Installation** 21 | 22 | For users of Coastsat, it is possible to run the CoastSat.PlanetScope toolkit in the original CoastSat environment once the following packages are installed: 23 | - [Rasterio](https://rasterio.readthedocs.io/en/latest/installation.html) 24 | - [AROSICS](https://danschef.git-pages.gfz-potsdam.de/arosics/doc/installation.html) 25 | 26 | It is recommended however to create a dedicated coastsat_ps environment using the provided environment.yml file. The advised method of installation is using [Mamba](https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html). [Anaconda](https://www.anaconda.com/) may also be used, however this method is slower and more prone to package conflicts. Once Mamba or Anaconda are installed, open Mamba/Anaconda prompt and navigate to the local downloaded CoastSat.PlanetScope repo folder by entering "cd /d C:\add\filepath\here\to\CoastSat.PlanetScope". Once this has been done, enter the following commands one by one to install the planetscope environment from the provided .yml file (replace mamba with conda below if using Anaconda): 27 | 28 | ``` 29 | mamba env create -f environment.yml -n coastsat_ps 30 | 31 | mamba activate coastsat_ps 32 | 33 | spyder 34 | ``` 35 | 36 | If installation fails using the default environment.yml, several alternate yaml file options are provided in the alt_environment folder. If you are having issues installing and opening spyder, see further recommendations in the 'Known Issues & Workarounds' section below. 37 | 38 | Once spyder is open, navigate to the CoastSat.PlanetScope folder to set the working direcctory (top right hand box in spyder) and open the CoastSat_PS.py file to begin the example run through. Note that every time you want to run the code, you will need to activate the coastsat_ps environnment and open spyder using the last two lines of code above. 39 | 40 | 41 | ## **Data Requirements** 42 | 43 | PlanetScope images must be manually downloaded by the user. 44 | - Access to PlanetScope imagery can be obtained through a [free trial](https://www.planet.com/trial/), [research license](https://www.planet.com/markets/education-and-research/) or [paid subscription](https://www.planet.com/contact-sales/#contact-sales). 45 | - Required PlanetScope file type is '4-band multispectral Analytic Ortho Scene'. Using the QGIS plugin, filter for "PlanetScope Scene" and download "Analytic Radiance (TOAR) 4-band GeoTiff" images. It is recommended to select the 'clip to AOI' options to reduce file size. Development was done with an AOI of ~5km2. 46 | 47 | - To run CoastSat.PlanetScope, keep all downloaded images and associated metadata in a single folder and outline this folder filepath in the CoastSat_PS.py settings. 48 | 49 | All user input files (area of interest polygon, transects & tide data) should be saved in the folder "...CoastSat.PlanetScope/user_inputs" 50 | - Analysis region of interest .kml file may be selected and downloaded using [this tool](http://geojson.io). 51 | - Transects .geojson file (optional) should match the user input settings epsg. If skipped, transects may be drawn manually with an interactive popup. Alternately, the provided NARRA_transect.geojson file may be manually modified in a text editor to add/remove/update transect names, coordinates and epsg. 52 | - Tide data .csv for tidal correction (optional) should be in UTC time and local mean sea level (MSL) elevation. See NARRA_tides.csv for csv data and column name formatting. 53 | 54 | Beach slopes for the tidal correction (step 5) can be extracted using the [CoastSat.Slope toolkit](https://github.com/kvos/CoastSat.slope) 55 | 56 | 57 | ## **Usage** 58 | 59 | ![](readme_files/timeseries.png) 60 | 61 | It is recommended the toolkit be run in spyder. Ensure spyder graphics backend is set to 'automatic' for proper interactive plot rendering. 62 | - Preferences - iPython console - Graphics - Graphics Backend - Backend - Automatic 63 | 64 | CoastSat.PlanetScope is run from the CoastSat_PS.py file. 65 | - Instructions and comments are provided in this file for each step. 66 | - It is recommended steps be run as individual cells for first time users. 67 | 68 | Settings and interactive steps are based on the CoastSat workflow and will be familiar to users of CoastSat. 69 | 70 | Interactive popup window steps include: 71 | - Raw PlanetScope reference image selection for co-registration [step 1.2.] 72 | - Top of Atmosphere merged reference image selection for shoreline extraction [step 2.1.] 73 | - Reference shoreline digitisation (refer 'Reference shoreline' section of CoastSat readme for example) - [step 2.1.] 74 | - Transect digitisation (optional - only if no transects.geojson file provided) - [step 2.1.] 75 | - Manual error detection (optional - keep/discard popup window as per CoastSat) - [step 3.] 76 | 77 | Results and plots are saved in '...CoastSat.PlanetScope/outputs/site_name/shoreline outputs'. 78 | 79 | 80 | ## **Training Neural-Network Classifier** 81 | 82 | Due to the preliminary stage of testing, validation was primarily completed at Narrabeen-Collaroy beach in Sydney, Australia. As such, the NN classifier is optimised for this site and may perform poorly at alternate sites with differing sediment composition. It is recommended a new classifier be trained for such regions. 83 | 84 | Steps are provided in "...CoastSat.PlanetScope/coastsat_ps/classifier/train_new_classifier.py". 85 | - Instructions are in this file and based of the CoastSat classifier training [methods](https://github.com/kvos/CoastSat/blob/master/doc/train_new_classifier.md). 86 | - CoastSat.PlanetScope must be run up to/including step 1.3. on a set of images to extract co-registered and top of atmosphere corrected scenes for classifier training. 87 | 88 | 89 | ## **Validation Results** 90 | 91 | - Accuracy validated against in-situ RTK-GPS survey data at Narrabeen-Collaroy beach in the Northen beaches of Sydney, Australia with a RMSE of 3.5m (n=438). 92 | - An equivelent validation study at Duck, North Carolina, USA provided an observed RMSE error of 4.7m (n=167). 93 | 94 | Detailed results and methodology outlined in: 95 | 96 | Doherty Y., Harley M.D., Splinter K.D., Vos K. (2022). A Python Toolkit to Monitor Sandy Shoreline Change Using High- Resolution PlanetScope Cubesats. Environmental Modelling & Software. https://doi.org/10.1016/j.envsoft.2022.105512 97 | 98 | As a starting point for user validation studies, an example jupyter notebook comparing CoastSat (Landsat/Sentinel-2) shorelines against in-situ survey data can be found on the main [CoastSat](https://github.com/kvos/CoastSat) repo for Narrabeen-Collaroy beach. Note that CoastSat.PlanetScope results will require re-structuring to match the CoastSat validation input format. 99 | 100 | 101 | ## **Development Opportunities** 102 | - Currently the Planet provided udm2 useable pixel filter is not supported and a conversion into the old udm format is used. An updated udm2 processing step may improve cloud and sensor error detection. 103 | - The PSB.SD sensor type (see [here](https://developers.planet.com/docs/apis/data/sensors)) was released while this project was in its final stages of development. Utilisation of these additional 4 image bands may be an opportunity to further improve shoreline accuracy. 104 | - Integration of existing CoastSat tools: 105 | - Automated extraction of FES2022 tide data 106 | - Automated integration with CoastSat.Slope 107 | - Add an example site vallidation codes 108 | - Add post processing and mapping/visualisation codes similar to [this](https://ci-folium-web-map.s3.ap-southeast-2.amazonaws.com/UNSW-WRL-CHIS/aus0206_Narrabeen.html). 109 | - Additional vallidation and comparison studies: 110 | - Comparison between the three PlanetScope Dove sensor types (PS2, PS2.SD and PSB.SD) 111 | - Vallidation and testing at additional sites globally 112 | - Testing along non-sandy coastlines 113 | 114 | 115 | ## **Known Issues & Workarounds** 116 | 117 | The following issues have been identified by users and workarounds are presented below. My availability to maintain and update this repo is limited so user feedback, bug fixes and devlopments are encouraged! 118 | 119 | #### **Environment installation** 120 | - Environment installation issues and package conflicts - see [here](https://github.com/ydoherty/CoastSat.PlanetScope/issues/2#issuecomment-830543064). Seems to be resolved for both mac and windows. Unexpected installation issues may still persist so alternate installation environments are provided in the event the standard yaml file does not work. If installation fails for all of the provided yaml files, try edit the default environment.yml file in a text editor to remove spyder and try again. If you do this you will either have to install spyder seperately, or run CoastSat.PlanetScope from an alternate IDE (ie VSCode). 121 | 122 | #### **Spyder crashing after opening** 123 | - Spyder does not play well with complex environments. In my experience running "mamba uninstall spyder", then "mamba install spyder" will generally fix the issue. Failing that, the fastest method is usually to remove your environment with "mamba env remove -n coastsat_ps" and try install one of the provided alternate environments such as "environment_alt_AUG_24_explicit.yml". Trying to debug an environment can take hours so its usually much faster to start from scratch. 124 | - It is also possible to manually install a standalone version of spyder and then link it to a python environment that doesn't have spyder installed. To use this method, download spyder from [here](https://docs.spyder-ide.org/5/installation.html#install-standalone). After this, edit the environment.yml file to replace 'spyder' with 'spyder-kernels' and then create the python environnment as per the installation instructions above. Using this method spyder needs to be opened manually rather than through the command line. Steps to link spyder to your environment are outlined [here](https://docs.spyder-ide.org/current/faq.html#using-packages-installer) and [here](https://docs.spyder-ide.org/current/faq.html#using-existing-environment). 125 | - If all else fails and you are unable to resolve your environment and/or open spyder, try install a new environment without spyder (steps outlined in the 'Environment installation' bullet point above). CoastSat.Planetscope can be run directly from the command line or using any alternate IDE (such as VSCode). 126 | 127 | 128 | #### **General** 129 | - If the toolbox repeatedly crashes on an individual image, inspect the file in GIS software to ensure there are no issues with the image. For impacted images, the easiest workaround is to delete the image (and all assoicated files) from the downloads folder and re-run the toolbox. 130 | - To remove all files created by the toolbox in order to start fresh, simply delete the CoastSat.PlanetScope > outputs > RUN_NAME folder. 131 | - If there are errors assoicated with file names, confirm input files (image tif, udm tif, image xml and image json) have the correct naming convention. The format should have the prefix "YYYYMMDD_HHMMSS_" followed by "_3B_AnalyticMS_clip.tif", "_3B_AnalyticMS_DN_udm_clip.tif", "_3B_AnalyticMS_metadata_clip.xml" and "_metadata.json". For example 20230523_153456_101b_3B_AnalyticMS_clip.tif. 132 | - Unresolved issues may exist when using large AOIs but this has not been tested. Split large regions into smaller subsets for processing if this is the case. 133 | 134 | #### **Co-registration** 135 | - Arosics does not run cell 1.3 (image co-registration) in spyder on windows. This appears to be due to the way arosics handles multiprocessing on the windows operating system. A workaround is to copy and run the code directly in the spyder console or to run using a selection and F9. Instructions are provided in cell 1.3. 136 | - Arosics raises errors when performing co-registration from cell 1.3. Sometimes this is due to the selected reference image from cell 1.2. Delete the files "SITE_NAME_im_ref.tif" and "SITE_NAME_land_mask.tif" in the folder CoastSat.PlanetScope > outputs > SITE_NAME > input_data and re-run cell 1.2 to select a new reference image. 137 | - Most images flagged as failing co-registration. Coregistration requires aligning the images based on land pixels only. If the land mask is poorly defined, try adjusting the settings mentioned in cell 1.2 or train a new classifier trained on your site. You can also try setting 'generic_land_mask' = True in data_import.py. This uses a single mask to align all images rather than calculating a new mask for each image (which is necessary when sites change significantly through time). Trying global coregistration may help for some sites, or try adjusting advanced coregistration settings in data_import.py such as tie-point grid spacing and window_size. 138 | - For regions where downloaded images do not all have the same CRS/epsg, the arosics co-registration step will fail and provide an error message. If this occurs, edit the advanced settings in the CoastSat.PlanetScope > coastsat_ps > data_import.py file (line 37) to change the arosics_reproject setting to True. 139 | - Inability to select a reference image. Comment out [this](https://github.com/ydoherty/CoastSat.PlanetScope/issues/2#issuecomment-828644872) section of code. See [here](https://github.com/ydoherty/CoastSat.PlanetScope/issues/2#issuecomment-840894375) for explanation. 140 | 141 | #### **Plots** 142 | - Poor figure rendering in the interacive error checking from cell 3. The aspect ratio for plotted figures in the "Shoreline plots" outputs folder is hard coded. Figures will vary depending on the AOI shape and may look poor for certain configurations. The user can edit plotting settings manually in the coastsat_ps > plotting.py file in the initialise_plot function. Plotting is for error checking/visual purposes only and poor figure rendering does not impact shoreline extracion in any way. 143 | - Figure window white with a tiny dot for the shoreline image. This may be caused when the input transect geojson file is in the wrong coordinate reference system. The figure will show still show the image and transects even if they have a different CRS. The blank space between them in the figure will be white. Fixed by updating the transect file, or by choosing the option to manually draw transects. 144 | 145 | #### **Non-rectangular crop regions** 146 | - When the code was written it was only possible to download images from Planet explorer cropped to a rectangular AOI. Downloading images cropped to a non-rectangular polygon may cause issues. See [this thread](https://github.com/ydoherty/CoastSat.PlanetScope/issues/14#issuecomment-2028491134) for more background and potential workaround. If you are unable to use rectangular cropped images, you may need to reduce the 'extent_thresh' setting for the coregistration steps to work (try 5% to see if it works, and then reamp it back up). This should not impact shoreline extraction. If anyone finds and tests a coded workaround let me know! 147 | 148 | -------------------------------------------------------------------------------- /alt_environments/environment_alt_AUG_24_explicit.yml: -------------------------------------------------------------------------------- 1 | name: coastsat_ps 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - affine=2.4.0=pyhd8ed1ab_0 7 | - alabaster=0.7.13=pyhd8ed1ab_0 8 | - anyio=3.7.1=pyhd8ed1ab_0 9 | - appdirs=1.4.4=pyh9f0ad1d_0 10 | - argon2-cffi=23.1.0=pyhd8ed1ab_0 11 | - argon2-cffi-bindings=21.2.0=py37hcc03f2d_2 12 | - arosics=1.4.1=py37h03978a9_0 13 | - arrow=1.2.3=pyhd8ed1ab_0 14 | - astroid=2.14.2=py37haa95532_0 15 | - astropy=4.3.1=py37hec80d1f_2 16 | - atomicwrites=1.4.1=pyhd8ed1ab_0 17 | - attrs=24.2.0=pyh71513ae_0 18 | - autopep8=1.6.0=pyhd8ed1ab_1 19 | - babel=2.14.0=pyhd8ed1ab_0 20 | - backcall=0.2.0=pyh9f0ad1d_0 21 | - backports=1.0=pyhd8ed1ab_4 22 | - backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 23 | - bcrypt=3.2.2=py37hcc03f2d_0 24 | - beautifulsoup4=4.12.3=pyha770c72_0 25 | - binaryornot=0.4.4=py_1 26 | - black=21.5b2=pyhd8ed1ab_0 27 | - bleach=6.1.0=pyhd8ed1ab_0 28 | - bokeh=2.4.3=pyhd8ed1ab_3 29 | - branca=0.7.2=pyhd8ed1ab_0 30 | - brotli=1.1.0=hcfcfb64_1 31 | - brotli-bin=1.1.0=hcfcfb64_1 32 | - brotli-python=1.0.9=py37hf2a7229_7 33 | - bzip2=1.0.8=h2466b09_7 34 | - ca-certificates=2024.7.2=haa95532_0 35 | - cartopy=0.17.0=py37hde43876_1009 36 | - certifi=2022.12.7=py37haa95532_0 37 | - cffi=1.15.1=py37ha95fbe2_1 38 | - cfitsio=3.470=h0af3d06_7 39 | - chardet=5.0.0=py37h03978a9_0 40 | - charset-normalizer=3.3.2=pyhd8ed1ab_0 41 | - click=7.1.2=pyh9f0ad1d_0 42 | - click-plugins=1.1.1=py_0 43 | - cligj=0.7.2=pyhd8ed1ab_1 44 | - cloudpickle=2.2.1=pyhd8ed1ab_0 45 | - cmocean=2.0=py_3 46 | - colorama=0.4.6=pyhd8ed1ab_0 47 | - colorcet=3.1.0=pyhd8ed1ab_0 48 | - colorspacious=1.1.2=pyh24bf2e0_0 49 | - cookiecutter=2.6.0=pyhca7485f_0 50 | - cryptography=38.0.2=py37h953a470_1 51 | - curl=8.8.0=h0dd56e1_1 52 | - cycler=0.11.0=pyhd8ed1ab_0 53 | - cytoolz=0.12.0=py37hcc03f2d_0 54 | - dask-core=2022.2.0=pyhd8ed1ab_0 55 | - dataclasses=0.8=pyhc8e2a94_3 56 | - debugpy=1.6.3=py37hf2a7229_0 57 | - decorator=5.1.1=pyhd8ed1ab_0 58 | - defusedxml=0.7.1=pyhd8ed1ab_0 59 | - diff-match-patch=20230430=pyhd8ed1ab_0 60 | - dill=0.3.8=pyhd8ed1ab_0 61 | - docutils=0.19=py37h03978a9_0 62 | - entrypoints=0.4=pyhd8ed1ab_0 63 | - exceptiongroup=1.2.2=pyhd8ed1ab_0 64 | - expat=2.6.2=h63175ca_0 65 | - fftw=3.3.10=nompi_h89e6982_110 66 | - fiona=1.8.13=py37hb7fdc2d_0 67 | - flake8=4.0.1=pyhd8ed1ab_2 68 | - folium=0.16.0=pyhd8ed1ab_0 69 | - fonttools=4.38.0=py37h51bd9d9_0 70 | - freetype=2.12.1=hdaf720e_2 71 | - freexl=1.0.6=h67ca5e6_1 72 | - fsspec=2023.1.0=pyhd8ed1ab_0 73 | - geoarray=0.14.3=py37h03978a9_0 74 | - geojson=3.1.0=pyhd8ed1ab_0 75 | - geopandas=0.10.2=pyhd8ed1ab_1 76 | - geopandas-base=0.10.2=pyha770c72_1 77 | - geos=3.8.0=he025d50_1 78 | - geotiff=1.7.0=h4545760_1 79 | - gettext=0.22.5=h5728263_3 80 | - gettext-tools=0.22.5=h5a7288d_3 81 | - glib=2.80.2=h0df6a38_0 82 | - glib-tools=2.80.2=h2f9d560_0 83 | - gst-plugins-base=1.18.5=he07aa86_3 84 | - gstreamer=1.18.5=hdff456e_3 85 | - hdf4=4.2.13=h0e5069d_1005 86 | - hdf5=1.10.6=nompi_he0bbb20_101 87 | - holoviews=1.17.1=pyhd8ed1ab_0 88 | - icu=58.2=ha925a31_3 89 | - idna=3.8=pyhd8ed1ab_0 90 | - imagecodecs-lite=2019.12.3=py37h0b711f8_5 91 | - imageio=2.35.1=pyh12aca89_0 92 | - imagesize=1.4.1=pyhd8ed1ab_0 93 | - importlib-metadata=4.11.4=py37h03978a9_0 94 | - importlib_metadata=4.11.4=hd8ed1ab_0 95 | - importlib_resources=6.0.0=pyhd8ed1ab_0 96 | - inflection=0.5.1=pyh9f0ad1d_0 97 | - intel-openmp=2024.2.1=h57928b3_1083 98 | - intervaltree=3.1.0=pyhd8ed1ab_1 99 | - ipykernel=6.16.2=pyh025b116_0 100 | - ipython=7.33.0=py37h03978a9_0 101 | - ipython_genutils=0.2.0=pyhd8ed1ab_1 102 | - isort=5.11.5=pyhd8ed1ab_0 103 | - jaraco.classes=3.4.0=pyhd8ed1ab_0 104 | - jedi=0.18.2=pyhd8ed1ab_0 105 | - jellyfish=0.9.0=py37hcc03f2d_1 106 | - jinja2=3.1.4=pyhd8ed1ab_0 107 | - jpeg=9e=hcfcfb64_3 108 | - jsonschema=4.17.3=pyhd8ed1ab_0 109 | - jupyter_client=7.4.9=pyhd8ed1ab_0 110 | - jupyter_core=4.11.1=py37h03978a9_0 111 | - jupyter_server=1.23.4=pyhd8ed1ab_0 112 | - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1 113 | - kealib=1.4.14=h96bfa42_2 114 | - keyring=23.9.3=py37h03978a9_0 115 | - kiwisolver=1.4.4=py37h8c56517_0 116 | - krb5=1.21.3=hdf4eb48_0 117 | - lazy-object-proxy=1.7.1=py37hcc03f2d_1 118 | - lerc=4.0.0=h63175ca_0 119 | - libasprintf=0.22.5=h5728263_3 120 | - libasprintf-devel=0.22.5=h5728263_3 121 | - libblas=3.9.0=23_win64_mkl 122 | - libbrotlicommon=1.1.0=hcfcfb64_1 123 | - libbrotlidec=1.1.0=hcfcfb64_1 124 | - libbrotlienc=1.1.0=hcfcfb64_1 125 | - libcblas=3.9.0=23_win64_mkl 126 | - libclang=12.0.1=default_h81446c8_4 127 | - libcurl=8.8.0=hd5e4a3a_1 128 | - libdeflate=1.17=hcfcfb64_0 129 | - libexpat=2.6.2=h63175ca_0 130 | - libffi=3.4.2=h8ffe710_5 131 | - libgdal=3.0.2=hc12e7b7_6 132 | - libgettextpo=0.22.5=h5728263_3 133 | - libgettextpo-devel=0.22.5=h5728263_3 134 | - libglib=2.80.2=h0df6a38_0 135 | - libiconv=1.17=hcfcfb64_2 136 | - libintl=0.22.5=h5728263_3 137 | - libintl-devel=0.22.5=h5728263_3 138 | - liblapack=3.9.0=23_win64_mkl 139 | - libnetcdf=4.8.1=h6685c40_2 140 | - libogg=1.3.5=h2466b09_0 141 | - libpng=1.6.43=h19919ed_0 142 | - libpq=12.15=h906ac69_0 143 | - libsodium=1.0.18=h8d14728_1 144 | - libspatialindex=1.9.3=h5a68840_5 145 | - libspatialite=4.3.0a=he9c6e69_1033 146 | - libsqlite=3.46.0=h2466b09_0 147 | - libssh2=1.11.0=h7dfc565_0 148 | - libtiff=4.5.0=hf8721a0_2 149 | - libvorbis=1.3.7=h0e60522_0 150 | - libwebp=1.2.4=hcfcfb64_3 151 | - libwebp-base=1.2.4=h8ffe710_0 152 | - libxml2=2.10.4=h0ad7f3c_2 153 | - libxslt=1.1.37=h2bbff1b_1 154 | - libzip=1.10.1=h1d365fa_3 155 | - libzlib=1.2.13=h2466b09_6 156 | - locket=1.0.0=pyhd8ed1ab_0 157 | - lxml=4.9.1=py37hcc03f2d_0 158 | - lz4-c=1.9.3=h8ffe710_1 159 | - m2w64-expat=2.1.1=2 160 | - m2w64-gcc-libgfortran=5.3.0=6 161 | - m2w64-gcc-libs=5.3.0=7 162 | - m2w64-gcc-libs-core=5.3.0=7 163 | - m2w64-gettext=0.19.7=2 164 | - m2w64-gmp=6.1.0=2 165 | - m2w64-libiconv=1.14=6 166 | - m2w64-libwinpthread-git=5.0.0.4634.697f757=2 167 | - m2w64-xz=5.2.2=2 168 | - mapclassify=2.5.0=pyhd8ed1ab_1 169 | - markdown=3.6=pyhd8ed1ab_0 170 | - markdown-it-py=2.2.0=pyhd8ed1ab_0 171 | - markupsafe=2.1.1=py37hcc03f2d_1 172 | - matplotlib=3.5.3=py37h03978a9_2 173 | - matplotlib-base=3.5.3=py37hbaab90a_2 174 | - matplotlib-inline=0.1.7=pyhd8ed1ab_0 175 | - mccabe=0.6.1=py_1 176 | - mdurl=0.1.2=pyhd8ed1ab_0 177 | - mistune=3.0.2=pyhd8ed1ab_0 178 | - mkl=2024.1.0=h66d3029_694 179 | - more-itertools=10.0.0=pyhd8ed1ab_0 180 | - msys2-conda-epoch=20160418=1 181 | - munch=2.5.0=py_0 182 | - munkres=1.1.4=pyh9f0ad1d_0 183 | - mypy_extensions=1.0.0=pyha770c72_0 184 | - nbclassic=1.1.0=pyhd8ed1ab_0 185 | - nbclient=0.7.0=pyhd8ed1ab_0 186 | - nbconvert=7.6.0=pyhd8ed1ab_0 187 | - nbconvert-core=7.6.0=pyhd8ed1ab_0 188 | - nbconvert-pandoc=7.6.0=pyhd8ed1ab_0 189 | - nbformat=5.8.0=pyhd8ed1ab_0 190 | - nest-asyncio=1.6.0=pyhd8ed1ab_0 191 | - networkx=2.6.3=pyhd8ed1ab_1 192 | - notebook=6.5.7=pyha770c72_0 193 | - notebook-shim=0.2.4=pyhd8ed1ab_0 194 | - numpy=1.21.6=py37h2830a78_0 195 | - numpydoc=1.6.0=pyhd8ed1ab_0 196 | - openjpeg=2.5.0=ha2aaf27_2 197 | - openssl=3.3.1=h2466b09_3 198 | - owslib=0.29.2=pyhd8ed1ab_0 199 | - packaging=23.2=pyhd8ed1ab_0 200 | - pandas=1.3.5=py37h9386db6_0 201 | - pandoc=3.3=h57928b3_0 202 | - pandocfilters=1.5.0=pyhd8ed1ab_0 203 | - panel=0.14.4=pyhd8ed1ab_0 204 | - param=1.13.0=pyh1a96a4e_0 205 | - paramiko=3.4.1=pyhd8ed1ab_0 206 | - parso=0.8.4=pyhd8ed1ab_0 207 | - partd=1.4.1=pyhd8ed1ab_0 208 | - pathspec=0.12.1=pyhd8ed1ab_0 209 | - pcre2=10.43=h17e33f8_0 210 | - pexpect=4.9.0=pyhd8ed1ab_0 211 | - pickleshare=0.7.5=py_1003 212 | - pillow=9.4.0=py37hd77b12b_0 213 | - pip=24.0=pyhd8ed1ab_0 214 | - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1 215 | - platformdirs=4.0.0=pyhd8ed1ab_0 216 | - plotly=5.23.0=pyhd8ed1ab_0 217 | - pluggy=1.0.0=py37h03978a9_3 218 | - ply=3.11=py37_0 219 | - proj=6.2.1=ha7a8c7b_0 220 | - prometheus_client=0.17.1=pyhd8ed1ab_0 221 | - prompt-toolkit=3.0.47=pyha770c72_0 222 | - psutil=5.9.3=py37h51bd9d9_0 223 | - pthreads-win32=2.9.1=hfa6e2cd_3 224 | - ptyprocess=0.7.0=pyhd3deb0d_0 225 | - py-tools-ds=0.16.9=py37h03978a9_0 226 | - pycodestyle=2.8.0=pyhd8ed1ab_0 227 | - pycparser=2.21=pyhd8ed1ab_0 228 | - pyct=0.4.6=py_0 229 | - pyct-core=0.4.6=py_0 230 | - pydocstyle=6.2.0=pyhd8ed1ab_0 231 | - pyepsg=0.4.0=py_0 232 | - pyerfa=2.0.0.1=py37h3a130e4_2 233 | - pyfftw=0.13.0=py37hdb909a5_2 234 | - pyflakes=2.4.0=pyhd8ed1ab_0 235 | - pygments=2.17.2=pyhd8ed1ab_0 236 | - pykdtree=1.3.5=py37h0b711f8_0 237 | - pykrige=1.7.0=py37hcc03f2d_0 238 | - pylint=2.16.4=pyhd8ed1ab_0 239 | - pyls-spyder=0.4.0=pyhd8ed1ab_0 240 | - pynacl=1.5.0=py37ha54c9ec_1 241 | - pyparsing=3.1.4=pyhd8ed1ab_0 242 | - pyproj=2.4.2.post1=py37h9716dcf_0 243 | - pyqt=5.15.7=py37hd77b12b_0 244 | - pyqt5-sip=12.11.0=py37hd77b12b_0 245 | - pyqtwebengine=5.15.7=py37hd77b12b_0 246 | - pyrsistent=0.18.1=py37hcc03f2d_1 247 | - pyshp=2.3.1=pyhd8ed1ab_0 248 | - pysocks=1.7.1=py37h03978a9_5 249 | - python=3.7.12=h900ac77_100_cpython 250 | - python-dateutil=2.9.0=pyhd8ed1ab_0 251 | - python-fastjsonschema=2.20.0=pyhd8ed1ab_0 252 | - python-lsp-black=1.1.0=pyhd8ed1ab_0 253 | - python-lsp-jsonrpc=1.0.0=pyhd8ed1ab_0 254 | - python-lsp-server=1.3.3=pyhd8ed1ab_0 255 | - python-slugify=8.0.4=pyhd8ed1ab_0 256 | - python_abi=3.7=4_cp37m 257 | - pytoolconfig=1.2.5=pyhd8ed1ab_0 258 | - pytz=2024.1=pyhd8ed1ab_0 259 | - pyviz_comms=3.0.3=pyhd8ed1ab_0 260 | - pywavelets=1.3.0=py37h3a130e4_1 261 | - pywin32=303=py37hcc03f2d_0 262 | - pywin32-ctypes=0.2.0=py37h03978a9_1005 263 | - pywinpty=2.0.8=py37h7f67f24_0 264 | - pyyaml=6.0=py37hcc03f2d_4 265 | - pyzmq=24.0.1=py37h7347f05_0 266 | - qdarkstyle=3.0.2=pyhd8ed1ab_0 267 | - qstylizer=0.2.3=pyhd8ed1ab_0 268 | - qt-main=5.15.2=he8e5bd7_7 269 | - qt-webengine=5.15.9=h5bd16bc_7 270 | - qtawesome=1.3.1=pyh9208f05_0 271 | - qtconsole=5.2.2=pyhd8ed1ab_1 272 | - qtconsole-base=5.2.2=pyhd8ed1ab_1 273 | - qtpy=2.4.1=pyhd8ed1ab_0 274 | - qtwebkit=5.212=h2bbfb41_5 275 | - rasterio=1.1.2=py37h2617b1b_0 276 | - regex=2022.9.13=py37h51bd9d9_0 277 | - requests=2.32.2=pyhd8ed1ab_0 278 | - rich=13.7.1=pyhd8ed1ab_0 279 | - rope=1.11.0=pyhd8ed1ab_0 280 | - rtree=1.0.1=py37h13cc57e_0 281 | - scikit-image=0.19.3=py37h3182a2c_1 282 | - scikit-learn=0.20.3=py37h3d241f0_1 283 | - scipy=1.7.3=py37hb6553fb_0 284 | - send2trash=1.8.3=pyh5737063_0 285 | - setuptools=59.8.0=py37h03978a9_1 286 | - shapely=1.7.0=py37h13a63d7_1 287 | - sip=6.6.2=py37hd77b12b_0 288 | - six=1.16.0=pyh6c4a22f_0 289 | - sniffio=1.3.1=pyhd8ed1ab_0 290 | - snowballstemmer=2.2.0=pyhd8ed1ab_0 291 | - snuggs=1.4.7=pyhd8ed1ab_1 292 | - sortedcontainers=2.4.0=pyhd8ed1ab_0 293 | - soupsieve=2.3.2.post1=pyhd8ed1ab_0 294 | - spectral=0.23.1=pyh1a96a4e_0 295 | - sphinx=5.3.0=pyhd8ed1ab_0 296 | - sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0 297 | - sphinxcontrib-devhelp=1.0.2=py_0 298 | - sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0 299 | - sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0 300 | - sphinxcontrib-qthelp=1.0.3=py_0 301 | - sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2 302 | - spyder=5.2.2=py37h03978a9_3 303 | - spyder-kernels=2.2.1=py37h03978a9_1 304 | - sqlite=3.46.0=h2466b09_0 305 | - tabulate=0.9.0=pyhd8ed1ab_1 306 | - tbb=2021.8.0=h59b6b97_0 307 | - tenacity=8.2.3=pyhd8ed1ab_0 308 | - terminado=0.17.0=pyh08f2357_0 309 | - text-unidecode=1.3=pyhd8ed1ab_1 310 | - textdistance=4.6.3=pyhd8ed1ab_0 311 | - three-merge=0.1.1=pyh9f0ad1d_0 312 | - tifffile=2020.6.3=py_0 313 | - tiledb=2.3.4=h78dabda_0 314 | - tinycss2=1.3.0=pyhd8ed1ab_0 315 | - tk=8.6.13=h5226925_1 316 | - toml=0.10.2=pyhd8ed1ab_0 317 | - tomli=2.0.1=pyhd8ed1ab_0 318 | - tomlkit=0.12.5=pyha770c72_0 319 | - toolz=0.12.1=pyhd8ed1ab_0 320 | - tornado=6.2=py37hcc03f2d_0 321 | - tqdm=4.66.5=pyhd8ed1ab_0 322 | - traitlets=5.9.0=pyhd8ed1ab_0 323 | - typed-ast=1.5.4=py37hcc03f2d_0 324 | - typing-extensions=4.7.1=hd8ed1ab_0 325 | - typing_extensions=4.7.1=pyha770c72_0 326 | - ucrt=10.0.22621.0=h57928b3_0 327 | - ujson=5.5.0=py37h7f67f24_0 328 | - unicodedata2=14.0.0=py37hcc03f2d_1 329 | - urllib3=2.2.1=pyhd8ed1ab_0 330 | - vc=14.3=h8a93ad2_20 331 | - vc14_runtime=14.40.33810=hcc2c482_20 332 | - vs2015_runtime=14.40.33810=h3bf8584_20 333 | - watchdog=2.1.9=py37h03978a9_0 334 | - wcwidth=0.2.10=pyhd8ed1ab_0 335 | - webencodings=0.5.1=pyhd8ed1ab_2 336 | - websocket-client=1.6.1=pyhd8ed1ab_0 337 | - wheel=0.42.0=pyhd8ed1ab_0 338 | - win_inet_pton=1.1.0=pyhd8ed1ab_6 339 | - winpty=0.4.3=4 340 | - wrapt=1.14.1=py37hcc03f2d_0 341 | - xerces-c=3.2.5=he0c23c2_1 342 | - xyzservices=2023.5.0=pyhd8ed1ab_0 343 | - xz=5.2.6=h8d14728_0 344 | - yaml=0.2.5=h8ffe710_2 345 | - yapf=0.33.0=pyhd8ed1ab_1 346 | - zeromq=4.3.4=h0e60522_1 347 | - zipp=3.15.0=pyhd8ed1ab_0 348 | - zlib=1.2.13=h2466b09_6 349 | - zstd=1.5.6=h0ea2cb4_0 350 | - pip: 351 | - gdal==3.0.2 352 | prefix: C:\Users\scotero\AppData\Local\anaconda3\envs\coastsat_ps 353 | -------------------------------------------------------------------------------- /alt_environments/environment_alt_AUG_24_general.yml: -------------------------------------------------------------------------------- 1 | name: coastsat_ps 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - affine=2.4.0 7 | - alabaster=0.7.13 8 | - anyio=3.7.1 9 | - appdirs=1.4.4 10 | - argon2-cffi=23.1.0 11 | - argon2-cffi-bindings=21.2.0 12 | - arosics=1.4.1 13 | - arrow=1.2.3 14 | - astroid=2.14.2 15 | - astropy=4.3.1 16 | - atomicwrites=1.4.1 17 | - attrs=24.2.0 18 | - autopep8=1.6.0 19 | - babel=2.14.0 20 | - backcall=0.2.0 21 | - backports=1.0 22 | - backports.functools_lru_cache=2.0.0 23 | - bcrypt=3.2.2 24 | - beautifulsoup4=4.12.3 25 | - binaryornot=0.4.4 26 | - black=21.5b2 27 | - bleach=6.1.0 28 | - bokeh=2.4.3 29 | - branca=0.7.2 30 | - brotli=1.1.0 31 | - brotli-bin=1.1.0 32 | - brotli-python=1.0.9 33 | - bzip2=1.0.8 34 | - ca-certificates=2024.7.2 35 | - cartopy=0.17.0 36 | - certifi=2022.12.7 37 | - cffi=1.15.1 38 | - cfitsio=3.470 39 | - chardet=5.0.0 40 | - charset-normalizer=3.3.2 41 | - click=7.1.2 42 | - click-plugins=1.1.1 43 | - cligj=0.7.2 44 | - cloudpickle=2.2.1 45 | - cmocean=2.0 46 | - colorama=0.4.6 47 | - colorcet=3.1.0 48 | - colorspacious=1.1.2 49 | - cookiecutter=2.6.0 50 | - cryptography=38.0.2 51 | - curl=8.8.0 52 | - cycler=0.11.0 53 | - cytoolz=0.12.0 54 | - dask-core=2022.2.0 55 | - dataclasses=0.8 56 | - debugpy=1.6.3 57 | - decorator=5.1.1 58 | - defusedxml=0.7.1 59 | - diff-match-patch=20230430 60 | - dill=0.3.8 61 | - docutils=0.19 62 | - entrypoints=0.4 63 | - exceptiongroup=1.2.2 64 | - expat=2.6.2 65 | - fftw=3.3.10 66 | - fiona=1.8.13 67 | - flake8=4.0.1 68 | - folium=0.16.0 69 | - fonttools=4.38.0 70 | - freetype=2.12.1 71 | - freexl=1.0.6 72 | - fsspec=2023.1.0 73 | - geoarray=0.14.3 74 | - geojson=3.1.0 75 | - geopandas=0.10.2 76 | - geopandas-base=0.10.2 77 | - geos=3.8.0 78 | - geotiff=1.7.0 79 | - gettext=0.22.5 80 | - gettext-tools=0.22.5 81 | - glib=2.80.2 82 | - glib-tools=2.80.2 83 | - gst-plugins-base=1.18.5 84 | - gstreamer=1.18.5 85 | - hdf4=4.2.13 86 | - hdf5=1.10.6 87 | - holoviews=1.17.1 88 | - icu=58.2 89 | - idna=3.8 90 | - imagecodecs-lite=2019.12.3 91 | - imageio=2.35.1 92 | - imagesize=1.4.1 93 | - importlib-metadata=4.11.4 94 | - importlib_metadata=4.11.4 95 | - importlib_resources=6.0.0 96 | - inflection=0.5.1 97 | - intel-openmp=2024.2.1 98 | - intervaltree=3.1.0 99 | - ipykernel=6.16.2 100 | - ipython=7.33.0 101 | - ipython_genutils=0.2.0 102 | - isort=5.11.5 103 | - jaraco.classes=3.4.0 104 | - jedi=0.18.2 105 | - jellyfish=0.9.0 106 | - jinja2=3.1.4 107 | - jpeg=9e 108 | - jsonschema=4.17.3 109 | - jupyter_client=7.4.9 110 | - jupyter_core=4.11.1 111 | - jupyter_server=1.23.4 112 | - jupyterlab_pygments=0.3.0 113 | - kealib=1.4.14 114 | - keyring=23.9.3 115 | - kiwisolver=1.4.4 116 | - krb5=1.21.3 117 | - lazy-object-proxy=1.7.1 118 | - lerc=4.0.0 119 | - libasprintf=0.22.5 120 | - libasprintf-devel=0.22.5 121 | - libblas=3.9.0 122 | - libbrotlicommon=1.1.0 123 | - libbrotlidec=1.1.0 124 | - libbrotlienc=1.1.0 125 | - libcblas=3.9.0 126 | - libclang=12.0.1 127 | - libcurl=8.8.0 128 | - libdeflate=1.17 129 | - libexpat=2.6.2 130 | - libffi=3.4.2 131 | - libgdal=3.0.2 132 | - libgettextpo=0.22.5 133 | - libgettextpo-devel=0.22.5 134 | - libglib=2.80.2 135 | - libiconv=1.17 136 | - libintl=0.22.5 137 | - libintl-devel=0.22.5 138 | - liblapack=3.9.0 139 | - libnetcdf=4.8.1 140 | - libogg=1.3.5 141 | - libpng=1.6.43 142 | - libpq=12.15 143 | - libsodium=1.0.18 144 | - libspatialindex=1.9.3 145 | - libspatialite=4.3.0a 146 | - libsqlite=3.46.0 147 | - libssh2=1.11.0 148 | - libtiff=4.5.0 149 | - libvorbis=1.3.7 150 | - libwebp=1.2.4 151 | - libwebp-base=1.2.4 152 | - libxml2=2.10.4 153 | - libxslt=1.1.37 154 | - libzip=1.10.1 155 | - libzlib=1.2.13 156 | - locket=1.0.0 157 | - lxml=4.9.1 158 | - lz4-c=1.9.3 159 | - m2w64-expat=2.1.1 160 | - m2w64-gcc-libgfortran=5.3.0 161 | - m2w64-gcc-libs=5.3.0 162 | - m2w64-gcc-libs-core=5.3.0 163 | - m2w64-gettext=0.19.7 164 | - m2w64-gmp=6.1.0 165 | - m2w64-libiconv=1.14 166 | - m2w64-libwinpthread-git=5.0.0.4634.697f757 167 | - m2w64-xz=5.2.2 168 | - mapclassify=2.5.0 169 | - markdown=3.6 170 | - markdown-it-py=2.2.0 171 | - markupsafe=2.1.1 172 | - matplotlib=3.5.3 173 | - matplotlib-base=3.5.3 174 | - matplotlib-inline=0.1.7 175 | - mccabe=0.6.1 176 | - mdurl=0.1.2 177 | - mistune=3.0.2 178 | - mkl=2024.1.0 179 | - more-itertools=10.0.0 180 | - msys2-conda-epoch=20160418 181 | - munch=2.5.0 182 | - munkres=1.1.4 183 | - mypy_extensions=1.0.0 184 | - nbclassic=1.1.0 185 | - nbclient=0.7.0 186 | - nbconvert=7.6.0 187 | - nbconvert-core=7.6.0 188 | - nbconvert-pandoc=7.6.0 189 | - nbformat=5.8.0 190 | - nest-asyncio=1.6.0 191 | - networkx=2.6.3 192 | - notebook=6.5.7 193 | - notebook-shim=0.2.4 194 | - numpy=1.21.6 195 | - numpydoc=1.6.0 196 | - openjpeg=2.5.0 197 | - openssl=3.3.1 198 | - owslib=0.29.2 199 | - packaging=23.2 200 | - pandas=1.3.5 201 | - pandoc=3.3 202 | - pandocfilters=1.5.0 203 | - panel=0.14.4 204 | - param=1.13.0 205 | - paramiko=3.4.1 206 | - parso=0.8.4 207 | - partd=1.4.1 208 | - pathspec=0.12.1 209 | - pcre2=10.43 210 | - pexpect=4.9.0 211 | - pickleshare=0.7.5 212 | - pillow=9.4.0 213 | - pip=24.0 214 | - pkgutil-resolve-name=1.3.10 215 | - platformdirs=4.0.0 216 | - plotly=5.23.0 217 | - pluggy=1.0.0 218 | - ply=3.11 219 | - proj=6.2.1 220 | - prometheus_client=0.17.1 221 | - prompt-toolkit=3.0.47 222 | - psutil=5.9.3 223 | - pthreads-win32=2.9.1 224 | - ptyprocess=0.7.0 225 | - py-tools-ds=0.16.9 226 | - pycodestyle=2.8.0 227 | - pycparser=2.21 228 | - pyct=0.4.6 229 | - pyct-core=0.4.6 230 | - pydocstyle=6.2.0 231 | - pyepsg=0.4.0 232 | - pyerfa=2.0.0.1 233 | - pyfftw=0.13.0 234 | - pyflakes=2.4.0 235 | - pygments=2.17.2 236 | - pykdtree=1.3.5 237 | - pykrige=1.7.0 238 | - pylint=2.16.4 239 | - pyls-spyder=0.4.0 240 | - pynacl=1.5.0 241 | - pyparsing=3.1.4 242 | - pyproj=2.4.2.post1 243 | - pyqt=5.15.7 244 | - pyqt5-sip=12.11.0 245 | - pyqtwebengine=5.15.7 246 | - pyrsistent=0.18.1 247 | - pyshp=2.3.1 248 | - pysocks=1.7.1 249 | - python=3.7.12 250 | - python-dateutil=2.9.0 251 | - python-fastjsonschema=2.20.0 252 | - python-lsp-black=1.1.0 253 | - python-lsp-jsonrpc=1.0.0 254 | - python-lsp-server=1.3.3 255 | - python-slugify=8.0.4 256 | - python_abi=3.7 257 | - pytoolconfig=1.2.5 258 | - pytz=2024.1 259 | - pyviz_comms=3.0.3 260 | - pywavelets=1.3.0 261 | - pywin32=303 262 | - pywin32-ctypes=0.2.0 263 | - pywinpty=2.0.8 264 | - pyyaml=6.0 265 | - pyzmq=24.0.1 266 | - qdarkstyle=3.0.2 267 | - qstylizer=0.2.3 268 | - qt-main=5.15.2 269 | - qt-webengine=5.15.9 270 | - qtawesome=1.3.1 271 | - qtconsole=5.2.2 272 | - qtconsole-base=5.2.2 273 | - qtpy=2.4.1 274 | - qtwebkit=5.212 275 | - rasterio=1.1.2 276 | - regex=2022.9.13 277 | - requests=2.32.2 278 | - rich=13.7.1 279 | - rope=1.11.0 280 | - rtree=1.0.1 281 | - scikit-image=0.19.3 282 | - scikit-learn=0.20.3 283 | - scipy=1.7.3 284 | - send2trash=1.8.3 285 | - setuptools=59.8.0 286 | - shapely=1.7.0 287 | - sip=6.6.2 288 | - six=1.16.0 289 | - sniffio=1.3.1 290 | - snowballstemmer=2.2.0 291 | - snuggs=1.4.7 292 | - sortedcontainers=2.4.0 293 | - soupsieve=2.3.2.post1 294 | - spectral=0.23.1 295 | - sphinx=5.3.0 296 | - sphinxcontrib-applehelp=1.0.4 297 | - sphinxcontrib-devhelp=1.0.2 298 | - sphinxcontrib-htmlhelp=2.0.1 299 | - sphinxcontrib-jsmath=1.0.1 300 | - sphinxcontrib-qthelp=1.0.3 301 | - sphinxcontrib-serializinghtml=1.1.5 302 | - spyder=5.2.2 303 | - spyder-kernels=2.2.1 304 | - sqlite=3.46.0 305 | - tabulate=0.9.0 306 | - tbb=2021.8.0 307 | - tenacity=8.2.3 308 | - terminado=0.17.0 309 | - text-unidecode=1.3 310 | - textdistance=4.6.3 311 | - three-merge=0.1.1 312 | - tifffile=2020.6.3 313 | - tiledb=2.3.4 314 | - tinycss2=1.3.0 315 | - tk=8.6.13 316 | - toml=0.10.2 317 | - tomli=2.0.1 318 | - tomlkit=0.12.5 319 | - toolz=0.12.1 320 | - tornado=6.2 321 | - tqdm=4.66.5 322 | - traitlets=5.9.0 323 | - typed-ast=1.5.4 324 | - typing-extensions=4.7.1 325 | - typing_extensions=4.7.1 326 | - ucrt=10.0.22621.0 327 | - ujson=5.5.0 328 | - unicodedata2=14.0.0 329 | - urllib3=2.2.1 330 | - vc=14.3 331 | - vc14_runtime=14.40.33810 332 | - vs2015_runtime=14.40.33810 333 | - watchdog=2.1.9 334 | - wcwidth=0.2.10 335 | - webencodings=0.5.1 336 | - websocket-client=1.6.1 337 | - wheel=0.42.0 338 | - win_inet_pton=1.1.0 339 | - winpty=0.4.3 340 | - wrapt=1.14.1 341 | - xerces-c=3.2.5 342 | - xyzservices=2023.5.0 343 | - xz=5.2.6 344 | - yaml=0.2.5 345 | - yapf=0.33.0 346 | - zeromq=4.3.4 347 | - zipp=3.15.0 348 | - zlib=1.2.13 349 | - zstd=1.5.6 350 | - pip: 351 | - gdal==3.0.2 352 | prefix: C:\Users\scotero\AppData\Local\anaconda3\envs\coastsat_ps 353 | -------------------------------------------------------------------------------- /alt_environments/environment_alt_MAR_21_explicit.yml: -------------------------------------------------------------------------------- 1 | name: coastsat_ps 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - affine=2.3.0=py_0 7 | - alabaster=0.7.12=py_0 8 | - appdirs=1.4.4=pyh9f0ad1d_0 9 | - applaunchservices=0.2.1=py_0 10 | - appnope=0.1.2=py37hf985489_1 11 | - argh=0.26.2=pyh9f0ad1d_1002 12 | - argon2-cffi=20.1.0=py37hf967b71_2 13 | - arosics=1.3.0=py37hf985489_0 14 | - arrow=1.0.3=py37hf985489_0 15 | - astroid=2.5.3=py37hf985489_0 16 | - astropy=4.2.1=py37h271585c_0 17 | - async_generator=1.10=py_0 18 | - atomicwrites=1.4.0=pyh9f0ad1d_0 19 | - attrs=20.3.0=pyhd3deb0d_0 20 | - autopep8=1.5.6=pyhd8ed1ab_0 21 | - babel=2.9.0=pyhd3deb0d_0 22 | - backcall=0.2.0=pyh9f0ad1d_0 23 | - backports=1.0=py_2 24 | - backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0 25 | - binaryornot=0.4.4=py_1 26 | - black=20.8b1=py_1 27 | - bleach=3.3.0=pyh44b312d_0 28 | - blosc=1.21.0=he49afe7_0 29 | - bokeh=2.3.1=py37hf985489_0 30 | - boost-cpp=1.74.0=h43a636a_2 31 | - branca=0.4.2=pyhd8ed1ab_0 32 | - brotli=1.0.9=h046ec9c_4 33 | - brotlipy=0.7.0=py37hf967b71_1001 34 | - brunsli=0.1=h046ec9c_0 35 | - bzip2=1.0.8=h0d85af4_4 36 | - c-ares=1.17.1=h0d85af4_1 37 | - ca-certificates=2020.12.5=h033912b_0 38 | - cairo=1.16.0=he43a7df_1008 39 | - cartopy=0.18.0=py37h0dfa92a_13 40 | - certifi=2020.12.5=py37hf985489_1 41 | - cffi=1.14.5=py37h4d371b4_0 42 | - cfitsio=3.470=h01dc385_7 43 | - cftime=1.4.1=py37h183f225_0 44 | - chardet=4.0.0=py37hf985489_1 45 | - charls=2.2.0=h046ec9c_0 46 | - click=7.1.2=pyh9f0ad1d_0 47 | - click-plugins=1.1.1=py_0 48 | - cligj=0.7.1=pyhd8ed1ab_0 49 | - cloudpickle=1.6.0=py_0 50 | - cmocean=2.0=py_3 51 | - colorama=0.4.4=pyh9f0ad1d_0 52 | - colorcet=2.0.6=pyhd8ed1ab_0 53 | - colorspacious=1.1.2=pyh24bf2e0_0 54 | - cookiecutter=1.7.2=pyh9f0ad1d_0 55 | - cryptography=3.4.7=py37hce4a858_0 56 | - curl=7.76.1=h06286d4_1 57 | - cycler=0.10.0=py_2 58 | - cytoolz=0.11.0=py37hf967b71_3 59 | - dask-core=2021.4.0=pyhd8ed1ab_0 60 | - dataclasses=0.8=pyhc8e2a94_1 61 | - dbus=1.13.6=ha13b53f_2 62 | - decorator=5.0.7=pyhd8ed1ab_0 63 | - defusedxml=0.7.1=pyhd8ed1ab_0 64 | - descartes=1.1.0=py_4 65 | - diff-match-patch=20200713=pyh9f0ad1d_0 66 | - dill=0.3.3=pyhd8ed1ab_0 67 | - docutils=0.17.1=py37hf985489_0 68 | - entrypoints=0.3=pyhd8ed1ab_1003 69 | - expat=2.3.0=he49afe7_0 70 | - fftw=3.3.9=nompi_h02cd531_101 71 | - fiona=1.8.18=py37h4963cc5_1 72 | - flake8=3.8.4=py_0 73 | - folium=0.11.0=py_0 74 | - fontconfig=2.13.1=h10f422b_1005 75 | - freetype=2.10.4=h4cff582_1 76 | - freexl=1.0.6=h0d85af4_0 77 | - fsspec=2021.4.0=pyhd8ed1ab_0 78 | - future=0.18.2=py37hf985489_3 79 | - gdal=3.2.1=py37h3b7b407_7 80 | - geoarray=0.11.0=py37hf985489_0 81 | - geojson=2.5.0=py_0 82 | - geopandas=0.9.0=pyhd8ed1ab_0 83 | - geos=3.9.1=he49afe7_2 84 | - geotiff=1.6.0=h4fce3c3_4 85 | - gettext=0.19.8.1=h7937167_1005 86 | - giflib=5.2.1=hbcb3906_2 87 | - glib=2.68.1=he49afe7_0 88 | - glib-tools=2.68.1=he49afe7_0 89 | - hdf4=4.2.13=hefd3b78_1005 90 | - hdf5=1.10.6=nompi_hc5d9132_1114 91 | - holoviews=1.14.3=pyhd8ed1ab_0 92 | - icu=68.1=h74dc148_0 93 | - idna=2.10=pyh9f0ad1d_0 94 | - imagecodecs=2021.3.31=py37h8c75038_0 95 | - imageio=2.9.0=py_0 96 | - imagesize=1.2.0=py_0 97 | - importlib-metadata=4.0.1=py37hf985489_0 98 | - importlib_metadata=4.0.1=hd8ed1ab_0 99 | - inflection=0.5.1=pyh9f0ad1d_0 100 | - intervaltree=3.0.2=py_0 101 | - ipykernel=5.5.3=py37h85f7c60_0 102 | - ipython=7.22.0=py37h85f7c60_0 103 | - ipython_genutils=0.2.0=py_1 104 | - isort=5.8.0=pyhd8ed1ab_0 105 | - jedi=0.17.2=py37hf985489_1 106 | - jinja2=2.11.3=pyh44b312d_0 107 | - jinja2-time=0.2.0=py_2 108 | - joblib=1.0.1=pyhd8ed1ab_0 109 | - jpeg=9d=hbcb3906_0 110 | - json-c=0.15=hcb556a6_0 111 | - jsonschema=3.2.0=pyhd8ed1ab_3 112 | - jupyter_client=6.1.12=pyhd8ed1ab_0 113 | - jupyter_core=4.7.1=py37hf985489_0 114 | - jupyterlab_pygments=0.1.2=pyh9f0ad1d_0 115 | - jxrlib=1.1=h35c211d_2 116 | - kealib=1.4.14=h31dd65d_2 117 | - keyring=23.0.1=py37hf985489_0 118 | - kiwisolver=1.3.1=py37h70f7d40_1 119 | - krb5=1.17.2=h60d9502_0 120 | - lazy-object-proxy=1.6.0=py37h271585c_0 121 | - lcms2=2.12=h577c468_0 122 | - lerc=2.2.1=h046ec9c_0 123 | - libaec=1.0.4=h046ec9c_1 124 | - libblas=3.9.0=8_openblas 125 | - libcblas=3.9.0=8_openblas 126 | - libclang=11.1.0=default_he082bbe_0 127 | - libcurl=7.76.1=h8ef9fac_1 128 | - libcxx=11.1.0=habf9029_0 129 | - libdap4=3.20.6=h3e144a0_2 130 | - libdeflate=1.7=h35c211d_5 131 | - libedit=3.1.20191231=h0678c8f_2 132 | - libev=4.33=haf1e3a3_1 133 | - libffi=3.3=h046ec9c_2 134 | - libgdal=3.2.1=h62a8350_7 135 | - libgfortran=5.0.0=9_3_0_h6c81a4c_22 136 | - libgfortran5=9.3.0=h6c81a4c_22 137 | - libglib=2.68.1=hd556434_0 138 | - libiconv=1.16=haf1e3a3_0 139 | - libkml=1.3.0=h8fd9edb_1013 140 | - liblapack=3.9.0=8_openblas 141 | - libllvm11=11.1.0=hd011deb_2 142 | - libnetcdf=4.7.4=nompi_h9d8a93f_107 143 | - libnghttp2=1.43.0=h07e645a_0 144 | - libopenblas=0.3.12=openmp_h54245bb_1 145 | - libpng=1.6.37=h7cec526_2 146 | - libpq=13.2=h052a64a_2 147 | - librttopo=1.1.0=h5413771_6 148 | - libsodium=1.0.18=hbcb3906_1 149 | - libspatialindex=1.9.3=h1c7c35f_3 150 | - libspatialite=5.0.1=h395434c_3 151 | - libssh2=1.9.0=h52ee1ee_6 152 | - libtiff=4.2.0=h7c11950_1 153 | - libwebp-base=1.2.0=h0d85af4_2 154 | - libxml2=2.9.10=h93ec3fd_4 155 | - libzopfli=1.0.3=h046ec9c_0 156 | - llvm-openmp=11.1.0=hda6cdc1_1 157 | - locket=0.2.0=py_2 158 | - lz4-c=1.9.3=h046ec9c_0 159 | - markdown=3.3.4=pyhd8ed1ab_0 160 | - markupsafe=1.1.1=py37hf967b71_3 161 | - matplotlib=3.4.1=py37hf985489_0 162 | - matplotlib-base=3.4.1=py37hb018525_0 163 | - mccabe=0.6.1=py_1 164 | - mistune=0.8.4=py37hf967b71_1003 165 | - munch=2.5.0=py_0 166 | - mypy_extensions=0.4.3=py37hf985489_3 167 | - mysql-common=8.0.23=h694c41f_1 168 | - mysql-libs=8.0.23=hbeb7981_1 169 | - nbclient=0.5.3=pyhd8ed1ab_0 170 | - nbconvert=6.0.7=py37hf985489_3 171 | - nbformat=5.1.3=pyhd8ed1ab_0 172 | - ncurses=6.2=h2e338ed_4 173 | - nest-asyncio=1.5.1=pyhd8ed1ab_0 174 | - netcdf4=1.5.6=nompi_py37hd2a0c98_102 175 | - networkx=2.5=py_0 176 | - notebook=6.3.0=pyha770c72_1 177 | - nspr=4.30=hcd9eead_0 178 | - nss=3.64=h31e2bf1_0 179 | - numpy=1.20.2=py37h84c02c4_0 180 | - numpydoc=1.1.0=py_1 181 | - olefile=0.46=pyh9f0ad1d_1 182 | - openjpeg=2.4.0=h6cbf5cd_0 183 | - openssl=1.1.1k=h0d85af4_0 184 | - packaging=20.9=pyh44b312d_0 185 | - pandas=1.2.4=py37hb23ed4d_0 186 | - pandoc=2.13=h0d85af4_0 187 | - pandocfilters=1.4.2=py_1 188 | - panel=0.11.3=pyhd8ed1ab_0 189 | - param=1.10.1=pyhd3deb0d_0 190 | - parso=0.7.0=pyh9f0ad1d_0 191 | - partd=1.2.0=pyhd8ed1ab_0 192 | - pathspec=0.8.1=pyhd3deb0d_0 193 | - pcre=8.44=hb1e8313_0 194 | - pexpect=4.8.0=pyh9f0ad1d_2 195 | - pickleshare=0.7.5=py_1003 196 | - pillow=8.1.2=py37hd4e48bc_1 197 | - pip=21.0.1=pyhd8ed1ab_0 198 | - pixman=0.40.0=hbcb3906_0 199 | - plotly=4.14.3=pyh44b312d_0 200 | - pluggy=0.13.1=py37hf985489_4 201 | - pooch=1.3.0=pyhd8ed1ab_0 202 | - poppler=0.89.0=hd735947_5 203 | - poppler-data=0.4.10=0 204 | - postgresql=13.2=ha63e576_2 205 | - poyo=0.5.0=py_0 206 | - proj=7.2.0=h78d1473_2 207 | - prometheus_client=0.10.1=pyhd8ed1ab_0 208 | - prompt-toolkit=3.0.18=pyha770c72_0 209 | - psutil=5.8.0=py37h271585c_1 210 | - ptyprocess=0.7.0=pyhd3deb0d_0 211 | - py-tools-ds=0.16.8=py37hf985489_0 212 | - pycodestyle=2.6.0=pyh9f0ad1d_0 213 | - pycparser=2.20=pyh9f0ad1d_2 214 | - pyct=0.4.6=py_0 215 | - pyct-core=0.4.6=py_0 216 | - pydocstyle=6.0.0=pyhd8ed1ab_0 217 | - pyepsg=0.4.0=py_0 218 | - pyerfa=1.7.2=py37h7585375_0 219 | - pyfftw=0.12.0=py37h2f54c68_2 220 | - pyflakes=2.2.0=pyh9f0ad1d_0 221 | - pygments=2.8.1=pyhd8ed1ab_0 222 | - pykrige=1.6.0=py37h271585c_0 223 | - pylint=2.7.2=py37hf985489_0 224 | - pyls-black=0.4.6=pyh9f0ad1d_0 225 | - pyls-spyder=0.3.2=pyhd8ed1ab_0 226 | - pyopenssl=20.0.1=pyhd8ed1ab_0 227 | - pyparsing=2.4.7=pyh9f0ad1d_0 228 | - pyproj=3.0.1=py37hb0b2c16_0 229 | - pyqt=5.12.3=py37hf985489_7 230 | - pyqt-impl=5.12.3=py37h5ae2165_7 231 | - pyqt5-sip=4.19.18=py37h5ab9d7d_7 232 | - pyqtchart=5.12=py37h5ae2165_7 233 | - pyqtwebengine=5.12.1=py37h5ae2165_7 234 | - pyrsistent=0.17.3=py37hf967b71_2 235 | - pyshp=2.1.3=pyh44b312d_0 236 | - pysocks=1.7.1=py37hf985489_3 237 | - python=3.7.10=h7728216_100_cpython 238 | - python-dateutil=2.8.1=py_0 239 | - python-jsonrpc-server=0.4.0=pyh9f0ad1d_0 240 | - python-language-server=0.36.2=pyhd8ed1ab_0 241 | - python-slugify=4.0.1=pyh9f0ad1d_0 242 | - python.app=1.3=py37hf967b71_4 243 | - python_abi=3.7=1_cp37m 244 | - pytz=2021.1=pyhd8ed1ab_0 245 | - pyviz_comms=2.0.1=pyhd3deb0d_0 246 | - pywavelets=1.1.1=py37h37391d0_3 247 | - pyyaml=5.4.1=py37hf967b71_0 248 | - pyzmq=22.0.3=py37h8f778e5_1 249 | - qdarkstyle=3.0.2=pyhd8ed1ab_0 250 | - qstylizer=0.1.10=pyhd8ed1ab_0 251 | - qt=5.12.9=h126340a_4 252 | - qtawesome=1.0.2=pyhd8ed1ab_0 253 | - qtconsole=5.0.3=pyhd8ed1ab_0 254 | - qtpy=1.9.0=py_0 255 | - rasterio=1.2.2=py37h0b4729b_0 256 | - readline=8.1=h05e3726_0 257 | - regex=2021.4.4=py37h271585c_0 258 | - requests=2.25.1=pyhd3deb0d_0 259 | - retrying=1.3.3=py_2 260 | - rope=0.18.0=pyhd3deb0d_0 261 | - rtree=0.9.7=py37hf13911c_1 262 | - scikit-image=0.18.1=py37h010c265_0 263 | - scikit-learn=0.22.2.post1=py37h3dc85bc_0 264 | - scipy=1.6.2=py37h866764c_0 265 | - send2trash=1.5.0=py_0 266 | - setuptools=49.6.0=py37hf985489_3 267 | - shapely=1.7.1=py37h2c4361b_4 268 | - six=1.15.0=pyh9f0ad1d_0 269 | - snappy=1.1.8=hb1e8313_3 270 | - snowballstemmer=2.1.0=pyhd8ed1ab_0 271 | - snuggs=1.4.7=py_0 272 | - sortedcontainers=2.3.0=pyhd8ed1ab_0 273 | - spectral=0.22.2=pyh44b312d_0 274 | - sphinx=3.5.3=pyhd8ed1ab_0 275 | - sphinxcontrib-applehelp=1.0.2=py_0 276 | - sphinxcontrib-devhelp=1.0.2=py_0 277 | - sphinxcontrib-htmlhelp=1.0.3=py_0 278 | - sphinxcontrib-jsmath=1.0.1=py_0 279 | - sphinxcontrib-qthelp=1.0.3=py_0 280 | - sphinxcontrib-serializinghtml=1.1.4=py_0 281 | - spyder=5.0.1=py37hf985489_0 282 | - spyder-kernels=2.0.1=py37hf985489_0 283 | - sqlite=3.35.5=h44b9ce1_0 284 | - terminado=0.9.4=py37hf985489_0 285 | - testpath=0.4.4=py_0 286 | - text-unidecode=1.3=py_0 287 | - textdistance=4.2.1=pyhd8ed1ab_0 288 | - three-merge=0.1.1=pyh9f0ad1d_0 289 | - tifffile=2021.4.8=pyhd8ed1ab_0 290 | - tiledb=2.2.7=he9a4fb4_0 291 | - tinycss=0.4=pyhd8ed1ab_1002 292 | - tk=8.6.10=h0419947_1 293 | - toml=0.10.2=pyhd8ed1ab_0 294 | - toolz=0.11.1=py_0 295 | - tornado=6.1=py37hf967b71_1 296 | - tqdm=4.60.0=pyhd8ed1ab_0 297 | - traitlets=5.0.5=py_0 298 | - typed-ast=1.4.3=py37h271585c_0 299 | - typing_extensions=3.7.4.3=py_0 300 | - tzcode=2021a=h0d85af4_1 301 | - tzdata=2021a=he74cb21_0 302 | - ujson=4.0.2=py37hd8d24ac_0 303 | - unidecode=1.2.0=pyhd8ed1ab_0 304 | - urllib3=1.26.4=pyhd8ed1ab_0 305 | - watchdog=1.0.2=py37hf967b71_1 306 | - wcwidth=0.2.5=pyh9f0ad1d_2 307 | - webencodings=0.5.1=py_1 308 | - wheel=0.36.2=pyhd3deb0d_0 309 | - whichcraft=0.6.1=py_0 310 | - wrapt=1.12.1=py37hf967b71_3 311 | - wurlitzer=2.1.0=py37hf985489_0 312 | - xarray=0.17.0=pyhd8ed1ab_0 313 | - xerces-c=3.2.3=h379762d_2 314 | - xz=5.2.5=haf1e3a3_1 315 | - yaml=0.2.5=haf1e3a3_0 316 | - yapf=0.31.0=pyhd8ed1ab_0 317 | - zeromq=4.3.4=h1c7c35f_0 318 | - zfp=0.5.5=he49afe7_5 319 | - zipp=3.4.1=pyhd8ed1ab_0 320 | - zlib=1.2.11=h7795811_1010 321 | - zstd=1.4.9=h582d3a0_0 322 | -------------------------------------------------------------------------------- /alt_environments/environment_alt_MAR_21_general.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - conda-forge 3 | - defaults 4 | dependencies: 5 | - python=3.7 6 | - numpy=1.20.1 7 | - matplotlib=3.3.4 8 | - pandas=1.2.2 9 | - geopandas=0.9.0 10 | - pytz=2020.1 11 | - scikit-image[version='>0.16.0'] 12 | - scikit-learn=0.20.3 13 | - astropy=4.2 14 | - gdal=3.0.4 15 | - shapely=1.7.1 16 | - scipy=1.2.1 17 | - pyproj[version='>2.2.0'] 18 | - pykrige=1.4.0 19 | - cartopy=0.17.0 20 | - pyfftw=0.12.0 21 | - rasterio=1.1.5 22 | - arosics=1.2.6 23 | - notebook 24 | - spyder 25 | -------------------------------------------------------------------------------- /coastsat_ps/classifier/classifier_functions.py: -------------------------------------------------------------------------------- 1 | # Classification functions 2 | 3 | import os 4 | import pytz 5 | import pickle 6 | import sys 7 | import matplotlib.pyplot as plt 8 | import matplotlib.cm as cm 9 | import numpy as np 10 | 11 | from datetime import datetime 12 | from pylab import ginput 13 | from matplotlib.widgets import LassoSelector 14 | from matplotlib import path 15 | from sklearn.metrics import confusion_matrix 16 | from skimage.segmentation import flood 17 | import skimage.morphology as morphology 18 | 19 | # coastsat modules 20 | sys.path.insert(0, os.pardir) 21 | 22 | from interactive import rescale_image_intensity 23 | 24 | 25 | # coastsat modules 26 | sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '..', '..'))) 27 | from shoreline_tools import (calculate_features, classify_image_NN, 28 | nd_index, get_ps_data) 29 | 30 | #%% PS data bridging functions 31 | 32 | def get_ps_metadata(filepath_images, epsg): 33 | 34 | ''' Function to match PS data with CoastSat metadata input ''' 35 | 36 | metadata = {'PS':{ 37 | 'filenames':[], 38 | 'acc_georef':[], 39 | 'epsg':[], 40 | 'dates':[]} 41 | } 42 | 43 | # Extract filenames 44 | for file in os.listdir(filepath_images): 45 | if file[-7:] == 'TOA.tif': 46 | metadata['PS']['filenames'] += [file] 47 | metadata['PS']['filenames'].sort() 48 | 49 | # Add other file data 50 | for file in metadata['PS']['filenames']: 51 | metadata['PS']['acc_georef'] += [5] 52 | metadata['PS']['epsg'] += [epsg] 53 | metadata['PS']['dates'] += [ 54 | pytz.utc.localize(datetime(int(file[:4]), 55 | int(file[5:7]), 56 | int(file[8:10]), 57 | int(file[11:13]), 58 | int(file[14:16]), 59 | int(file[17:19]))) 60 | ] 61 | 62 | return metadata 63 | 64 | 65 | #%% Modified CoastSat Functions - Labelling - SDS Classify 66 | 67 | class SelectFromImage(object): 68 | """ 69 | Class used to draw the lassos on the images with two methods: 70 | - onselect: save the pixels inside the selection 71 | - disconnect: stop drawing lassos on the image 72 | 73 | Copied with permission from CoastSat (KV, 2020) 74 | https://github.com/kvos/CoastSat 75 | 76 | """ 77 | # initialize lasso selection class 78 | def __init__(self, ax, implot, color=[1,1,1]): 79 | self.canvas = ax.figure.canvas 80 | self.implot = implot 81 | self.array = implot.get_array() 82 | xv, yv = np.meshgrid(np.arange(self.array.shape[1]),np.arange(self.array.shape[0])) 83 | self.pix = np.vstack( (xv.flatten(), yv.flatten()) ).T 84 | self.ind = [] 85 | self.im_bool = np.zeros((self.array.shape[0], self.array.shape[1])) 86 | self.color = color 87 | self.lasso = LassoSelector(ax, onselect=self.onselect) 88 | 89 | def onselect(self, verts): 90 | # find pixels contained in the lasso 91 | p = path.Path(verts) 92 | self.ind = p.contains_points(self.pix, radius=1) 93 | # color selected pixels 94 | array_list = [] 95 | for k in range(self.array.shape[2]): 96 | array2d = self.array[:,:,k] 97 | lin = np.arange(array2d.size) 98 | new_array2d = array2d.flatten() 99 | new_array2d[lin[self.ind]] = self.color[k] 100 | array_list.append(new_array2d.reshape(array2d.shape)) 101 | self.array = np.stack(array_list,axis=2) 102 | self.implot.set_data(self.array) 103 | self.canvas.draw_idle() 104 | # update boolean image with selected pixels 105 | vec_bool = self.im_bool.flatten() 106 | vec_bool[lin[self.ind]] = 1 107 | self.im_bool = vec_bool.reshape(self.im_bool.shape) 108 | 109 | def disconnect(self): 110 | self.lasso.disconnect_events() 111 | 112 | 113 | def label_images(metadata,settings): 114 | """ 115 | Load satellite images and interactively label different classes (hard-coded) 116 | 117 | KV WRL 2019 118 | 119 | Modified for PS data YD WRL 2020 120 | 121 | Arguments: 122 | ----------- 123 | metadata: dict 124 | contains all the information about the satellite images that were downloaded 125 | settings: dict with the following keys 126 | 'cloud_thresh': float 127 | value between 0 and 1 indicating the maximum cloud fraction in 128 | the cropped image that is accepted 129 | 'cloud_mask_issue': boolean 130 | True if there is an issue with the cloud mask and sand pixels 131 | are erroneously being masked on the images 132 | 'labels': dict 133 | list of label names (key) and label numbers (value) for each class 134 | 'flood_fill': boolean 135 | True to use the flood_fill functionality when labelling sand pixels 136 | 'tolerance': float 137 | tolerance value for flood fill when labelling the sand pixels 138 | 'filepath_train': str 139 | directory in which to save the labelled data 140 | 'inputs': dict 141 | input parameters (sitename, filepath, polygon, dates, sat_list) 142 | 143 | Returns: 144 | ----------- 145 | Stores the labelled data in the specified directory 146 | 147 | """ 148 | 149 | filepath_train = settings['filepath_train'] 150 | # initialize figure 151 | fig,ax = plt.subplots(1,1,figsize=[17,10], tight_layout=True,sharex=True, 152 | sharey=True) 153 | mng = plt.get_current_fig_manager() 154 | mng.window.showMaximized() 155 | 156 | # loop through satellites 157 | for satname in metadata.keys(): 158 | filepath = settings['inputs']['filepath'] 159 | filenames = metadata[satname]['filenames'] 160 | # loop through images 161 | for i in range(len(filenames)): 162 | # image filename 163 | fn = os.path.join(filepath, filenames[i]) 164 | # read and preprocess image 165 | im_ms, cloud_mask = get_ps_data(fn) 166 | # calculate cloud cover 167 | cloud_cover = np.divide(sum(sum(cloud_mask.astype(int))), 168 | (cloud_mask.shape[0]*cloud_mask.shape[1])) 169 | # skip image if cloud cover is above threshold 170 | if cloud_cover > settings['cloud_thresh'] or cloud_cover == 1: 171 | continue 172 | # get individual RGB image 173 | im_RGB = rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9) 174 | im_NDVI = nd_index(im_ms[:,:,3], im_ms[:,:,2], cloud_mask) # Nir - Red 175 | #im_NDWI = nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask) # Nir - Green 176 | im_NDWI = nd_index(im_ms[:,:,3], im_ms[:,:,0], cloud_mask) # Nir - Blue 177 | 178 | # initialise labels 179 | im_viz = im_RGB.copy() 180 | im_labels = np.zeros([im_RGB.shape[0],im_RGB.shape[1]]) 181 | # show RGB image 182 | ax.axis('off') 183 | ax.imshow(im_RGB) 184 | implot = ax.imshow(im_viz, alpha=0.6) 185 | filename = filenames[i][:filenames[i].find('.')][:-4] 186 | ax.set_title(filename) 187 | 188 | ############################################################## 189 | # select image to label 190 | ############################################################## 191 | # set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071) 192 | # this variable needs to be immuatable so we can access it after the keypress event 193 | key_event = {} 194 | def press(event): 195 | # store what key was pressed in the dictionary 196 | key_event['pressed'] = event.key 197 | # let the user press a key, right arrow to keep the image, left arrow to skip it 198 | # to break the loop the user can press 'escape' 199 | while True: 200 | btn_keep = ax.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top", 201 | transform=ax.transAxes, 202 | bbox=dict(boxstyle="square", ec='k',fc='w')) 203 | btn_skip = ax.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top", 204 | transform=ax.transAxes, 205 | bbox=dict(boxstyle="square", ec='k',fc='w')) 206 | btn_esc = ax.text(0.5, 0, ' to quit', size=12, ha="center", va="top", 207 | transform=ax.transAxes, 208 | bbox=dict(boxstyle="square", ec='k',fc='w')) 209 | fig.canvas.draw_idle() 210 | fig.canvas.mpl_connect('key_press_event', press) 211 | plt.waitforbuttonpress() 212 | # after button is pressed, remove the buttons 213 | btn_skip.remove() 214 | btn_keep.remove() 215 | btn_esc.remove() 216 | 217 | # keep/skip image according to the pressed key, 'escape' to break the loop 218 | if key_event.get('pressed') == 'right': 219 | skip_image = False 220 | break 221 | elif key_event.get('pressed') == 'left': 222 | skip_image = True 223 | break 224 | elif key_event.get('pressed') == 'escape': 225 | plt.close() 226 | raise StopIteration('User cancelled labelling images') 227 | else: 228 | plt.waitforbuttonpress() 229 | 230 | # if user decided to skip show the next image 231 | if skip_image: 232 | ax.clear() 233 | continue 234 | # otherwise label this image 235 | else: 236 | ############################################################## 237 | # digitize sandy pixels 238 | ############################################################## 239 | ax.set_title('Click on SAND pixels (flood fill activated, tolerance = %.2f)\nwhen finished press '%settings['tolerance']) 240 | # create erase button, if you click there it delets the last selection 241 | btn_erase = ax.text(im_ms.shape[1], 0, 'Erase', size=20, ha='right', va='top', 242 | bbox=dict(boxstyle="square", ec='k',fc='w')) 243 | fig.canvas.draw_idle() 244 | color_sand = settings['colors']['sand'] 245 | sand_pixels = [] 246 | while 1: 247 | seed = ginput(n=1, timeout=0, show_clicks=True) 248 | # if empty break the loop and go to next label 249 | if len(seed) == 0: 250 | break 251 | else: 252 | # round to pixel location 253 | seed = np.round(seed[0]).astype(int) 254 | # if user clicks on erase, delete the last selection 255 | if seed[0] > 0.95*im_ms.shape[1] and seed[1] < 0.05*im_ms.shape[0]: 256 | if len(sand_pixels) > 0: 257 | im_labels[sand_pixels[-1]] = 0 258 | for k in range(im_viz.shape[2]): 259 | im_viz[sand_pixels[-1],k] = im_RGB[sand_pixels[-1],k] 260 | implot.set_data(im_viz) 261 | fig.canvas.draw_idle() 262 | del sand_pixels[-1] 263 | 264 | # otherwise label the selected sand pixels 265 | else: 266 | # flood fill the NDVI and the NDWI 267 | fill_NDVI = flood(im_NDVI, (seed[1],seed[0]), tolerance=settings['tolerance']) 268 | fill_NDWI = flood(im_NDWI, (seed[1],seed[0]), tolerance=settings['tolerance']) 269 | # compute the intersection of the two masks 270 | fill_sand = np.logical_and(fill_NDVI, fill_NDWI) 271 | im_labels[fill_sand] = settings['labels']['sand'] 272 | sand_pixels.append(fill_sand) 273 | # show the labelled pixels 274 | for k in range(im_viz.shape[2]): 275 | im_viz[im_labels==settings['labels']['sand'],k] = color_sand[k] 276 | implot.set_data(im_viz) 277 | fig.canvas.draw_idle() 278 | 279 | ############################################################## 280 | # digitize white-water pixels 281 | ############################################################## 282 | color_ww = settings['colors']['white-water'] 283 | ax.set_title('Click on individual WHITE-WATER pixels (no flood fill)\nwhen finished press ') 284 | fig.canvas.draw_idle() 285 | ww_pixels = [] 286 | while 1: 287 | seed = ginput(n=1, timeout=0, show_clicks=True) 288 | # if empty break the loop and go to next label 289 | if len(seed) == 0: 290 | break 291 | else: 292 | # round to pixel location 293 | seed = np.round(seed[0]).astype(int) 294 | # if user clicks on erase, delete the last labelled pixels 295 | if seed[0] > 0.95*im_ms.shape[1] and seed[1] < 0.05*im_ms.shape[0]: 296 | if len(ww_pixels) > 0: 297 | im_labels[ww_pixels[-1][1],ww_pixels[-1][0]] = 0 298 | for k in range(im_viz.shape[2]): 299 | im_viz[ww_pixels[-1][1],ww_pixels[-1][0],k] = im_RGB[ww_pixels[-1][1],ww_pixels[-1][0],k] 300 | implot.set_data(im_viz) 301 | fig.canvas.draw_idle() 302 | del ww_pixels[-1] 303 | else: 304 | im_labels[seed[1],seed[0]] = settings['labels']['white-water'] 305 | for k in range(im_viz.shape[2]): 306 | im_viz[seed[1],seed[0],k] = color_ww[k] 307 | implot.set_data(im_viz) 308 | fig.canvas.draw_idle() 309 | ww_pixels.append(seed) 310 | 311 | im_sand_ww = im_viz.copy() 312 | btn_erase.set(text=' to Erase', fontsize=12) 313 | 314 | ############################################################## 315 | # digitize water pixels (with lassos) 316 | ############################################################## 317 | color_water = settings['colors']['water'] 318 | ax.set_title('Click and hold to draw lassos and select WATER pixels\nwhen finished press ') 319 | fig.canvas.draw_idle() 320 | selector_water = SelectFromImage(ax, implot, color_water) 321 | key_event = {} 322 | while True: 323 | fig.canvas.draw_idle() 324 | fig.canvas.mpl_connect('key_press_event', press) 325 | plt.waitforbuttonpress() 326 | if key_event.get('pressed') == 'enter': 327 | selector_water.disconnect() 328 | break 329 | elif key_event.get('pressed') == 'escape': 330 | selector_water.array = im_sand_ww 331 | implot.set_data(selector_water.array) 332 | fig.canvas.draw_idle() 333 | selector_water.implot = implot 334 | selector_water.im_bool = np.zeros((selector_water.array.shape[0], selector_water.array.shape[1])) 335 | selector_water.ind=[] 336 | # update im_viz and im_labels 337 | im_viz = selector_water.array 338 | selector_water.im_bool = selector_water.im_bool.astype(bool) 339 | im_labels[selector_water.im_bool] = settings['labels']['water'] 340 | 341 | im_sand_ww_water = im_viz.copy() 342 | 343 | ############################################################## 344 | # digitize land pixels (with lassos) 345 | ############################################################## 346 | color_land = settings['colors']['other land features'] 347 | ax.set_title('Click and hold to draw lassos and select OTHER LAND pixels\nwhen finished press ') 348 | fig.canvas.draw_idle() 349 | selector_land = SelectFromImage(ax, implot, color_land) 350 | key_event = {} 351 | while True: 352 | fig.canvas.draw_idle() 353 | fig.canvas.mpl_connect('key_press_event', press) 354 | plt.waitforbuttonpress() 355 | if key_event.get('pressed') == 'enter': 356 | selector_land.disconnect() 357 | break 358 | elif key_event.get('pressed') == 'escape': 359 | selector_land.array = im_sand_ww_water 360 | implot.set_data(selector_land.array) 361 | fig.canvas.draw_idle() 362 | selector_land.implot = implot 363 | selector_land.im_bool = np.zeros((selector_land.array.shape[0], selector_land.array.shape[1])) 364 | selector_land.ind=[] 365 | # update im_viz and im_labels 366 | im_viz = selector_land.array 367 | selector_land.im_bool = selector_land.im_bool.astype(bool) 368 | im_labels[selector_land.im_bool] = settings['labels']['other land features'] 369 | 370 | # save labelled image 371 | ax.set_title(filename) 372 | fig.canvas.draw_idle() 373 | fp = os.path.join(filepath_train,settings['inputs']['sitename']) 374 | if not os.path.exists(fp): 375 | os.makedirs(fp) 376 | fig.savefig(os.path.join(fp,filename+'.jpg'), dpi=150) 377 | ax.clear() 378 | # save labels and features 379 | features = dict([]) 380 | for key in settings['labels'].keys(): 381 | im_bool = im_labels == settings['labels'][key] 382 | features[key] = calculate_features(im_ms, cloud_mask, im_bool) 383 | training_data = {'labels':im_labels, 'features':features, 'label_ids':settings['labels']} 384 | with open(os.path.join(fp, filename + '.pkl'), 'wb') as f: 385 | pickle.dump(training_data,f) 386 | 387 | # close figure when finished 388 | plt.close(fig) 389 | 390 | 391 | def load_labels(train_sites, settings): 392 | """ 393 | Load the labelled data from the different training sites 394 | 395 | KV WRL 2019 396 | 397 | Arguments: 398 | ----------- 399 | train_sites: list of str 400 | sites to be loaded 401 | settings: dict with the following keys 402 | 'labels': dict 403 | list of label names (key) and label numbers (value) for each class 404 | 'filepath_train': str 405 | directory in which to save the labelled data 406 | 407 | Returns: 408 | ----------- 409 | features: dict 410 | contains the features for each labelled pixel 411 | 412 | """ 413 | 414 | filepath_train = settings['filepath_train'] 415 | # initialize the features dict 416 | features = dict([]) 417 | n_features = 16 # (no. bands + no. indices tested)*2 to account for std versions 418 | first_row = np.nan*np.ones((1,n_features)) 419 | for key in settings['labels'].keys(): 420 | features[key] = first_row 421 | # loop through each site 422 | for sitename in train_sites: 423 | #sitename = site[:site.find('.')] 424 | filepath = os.path.join(filepath_train,sitename) 425 | if os.path.exists(filepath): 426 | list_files = os.listdir(filepath) 427 | else: 428 | continue 429 | # make a new list with only the .pkl files (no .jpg) 430 | list_files_pkl = [] 431 | for file in list_files: 432 | if '.pkl' in file: 433 | list_files_pkl.append(file) 434 | # load and append the training data to the features dict 435 | for file in list_files_pkl: 436 | # read file 437 | with open(os.path.join(filepath, file), 'rb') as f: 438 | labelled_data = pickle.load(f) 439 | for key in labelled_data['features'].keys(): 440 | if len(labelled_data['features'][key])>0: # check that is not empty 441 | # append rows 442 | features[key] = np.append(features[key], 443 | labelled_data['features'][key], axis=0) 444 | # remove the first row (initialized with nans) and print how many pixels 445 | print('Number of pixels per class in training data:') 446 | for key in features.keys(): 447 | features[key] = features[key][1:,:] 448 | print('%s : %d pixels'%(key,len(features[key]))) 449 | 450 | return features 451 | 452 | 453 | def format_training_data(features, classes, labels): 454 | """ 455 | Format the labelled data in an X features matrix and a y labels vector, so 456 | that it can be used for training an ML model. 457 | 458 | KV WRL 2019 459 | 460 | Arguments: 461 | ----------- 462 | features: dict 463 | contains the features for each labelled pixel 464 | classes: list of str 465 | names of the classes 466 | labels: list of int 467 | int value associated with each class (in the same order as classes) 468 | 469 | Returns: 470 | ----------- 471 | X: np.array 472 | matrix features along the columns and pixels along the rows 473 | y: np.array 474 | vector with the labels corresponding to each row of X 475 | 476 | """ 477 | 478 | # initialize X and y 479 | X = np.nan*np.ones((1,features[classes[0]].shape[1])) 480 | y = np.nan*np.ones((1,1)) 481 | # append row of features to X and corresponding label to y 482 | for i,key in enumerate(classes): 483 | y = np.append(y, labels[i]*np.ones((features[key].shape[0],1)), axis=0) 484 | X = np.append(X, features[key], axis=0) 485 | # remove first row 486 | X = X[1:,:]; y = y[1:] 487 | # replace nans with something close to 0 488 | # training algotihms cannot handle nans 489 | X[np.isnan(X)] = 1e-9 490 | 491 | return X, y 492 | 493 | 494 | def plot_confusion_matrix(y_true,y_pred,classes,normalize=False,cmap=plt.cm.Blues): 495 | """ 496 | Function copied from the scikit-learn examples (https://scikit-learn.org/stable/) 497 | This function plots a confusion matrix. 498 | Normalization can be applied by setting `normalize=True`. 499 | 500 | """ 501 | # compute confusion matrix 502 | cm = confusion_matrix(y_true, y_pred) 503 | if normalize: 504 | cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] 505 | print("Normalized confusion matrix") 506 | else: 507 | print('Confusion matrix, without normalization') 508 | 509 | # plot confusion matrix 510 | fig, ax = plt.subplots(figsize=(6,6), tight_layout=True) 511 | im = ax.imshow(cm, interpolation='nearest', cmap=cmap) 512 | # ax.figure.colorbar(im, ax=ax) 513 | ax.set(xticks=np.arange(cm.shape[1]), 514 | yticks=np.arange(cm.shape[0]), ylim=[3.5,-0.5], 515 | xticklabels=classes, yticklabels=classes, 516 | ylabel='True label', 517 | xlabel='Predicted label') 518 | 519 | # rotate the tick labels and set their alignment. 520 | plt.setp(ax.get_xticklabels(), rotation=45, ha="right", 521 | rotation_mode="anchor") 522 | 523 | # loop over data dimensions and create text annotations. 524 | fmt = '.2f' if normalize else 'd' 525 | thresh = cm.max() / 2. 526 | for i in range(cm.shape[0]): 527 | for j in range(cm.shape[1]): 528 | ax.text(j, i, format(cm[i, j], fmt), 529 | ha="center", va="center", 530 | color="white" if cm[i, j] > thresh else "black", 531 | fontsize=12) 532 | fig.tight_layout() 533 | return ax 534 | 535 | 536 | def evaluate_classifier(classifier, classifier_save_name, metadata, settings): 537 | """ 538 | Apply the image classifier to all the images and save the classified images. 539 | 540 | KV WRL 2019 541 | 542 | Modified for PS data YD 2020 543 | 544 | Arguments: 545 | ----------- 546 | classifier: joblib object 547 | classifier model to be used for image classification 548 | metadata: dict 549 | contains all the information about the satellite images that were downloaded 550 | settings: dict with the following keys 551 | 'inputs': dict 552 | input parameters (sitename, filepath, polygon, dates, sat_list) 553 | 'cloud_thresh': float 554 | value between 0 and 1 indicating the maximum cloud fraction in 555 | the cropped image that is accepted 556 | 'cloud_mask_issue': boolean 557 | True if there is an issue with the cloud mask and sand pixels 558 | are erroneously being masked on the images 559 | 'buffer_size': int 560 | size of the buffer (m) around the sandy pixels over which the pixels 561 | are considered in the thresholding algorithm 562 | 'min_beach_area': int 563 | minimum allowable object area (in metres^2) for the class 'sand', 564 | the area is converted to number of connected pixels 565 | 'min_length_sl': int 566 | minimum length (in metres) of shoreline contour to be valid 567 | 568 | Returns: 569 | ----------- 570 | Saves .jpg images with the output of the classification in the folder ./detection 571 | 572 | """ 573 | 574 | # create folder called evaluation 575 | fp = os.path.join(os.getcwd(), 'evaluation') 576 | if not os.path.exists(fp): 577 | os.makedirs(fp) 578 | 579 | # create sub folder of classifier 580 | fp = os.path.join(fp, classifier_save_name) 581 | if not os.path.exists(fp): 582 | os.makedirs(fp) 583 | 584 | # initialize figure (not interactive) 585 | plt.ioff() 586 | fig,ax = plt.subplots(1,2,figsize=[17,10],sharex=True, sharey=True, 587 | constrained_layout=True) 588 | 589 | # create colormap for labels 590 | cmap = cm.get_cmap('tab20c') 591 | colorpalette = cmap(np.arange(0,13,1)) 592 | colours = np.zeros((3,4)) 593 | colours[0,:] = colorpalette[5] 594 | colours[1,:] = np.array([204/255,1,0,1]) 595 | colours[2,:] = np.array([0,91/255,1,1]) 596 | # loop through satellites 597 | for satname in metadata.keys(): 598 | filepath = settings['inputs']['filepath'] 599 | filenames = metadata[satname]['filenames'] 600 | 601 | # load classifiers and loop through images 602 | for i in range(len(filenames)): 603 | # image filename 604 | fn = os.path.join(filepath, filenames[i]) 605 | # read and preprocess image 606 | im_ms, cloud_mask = get_ps_data(fn) 607 | # calculate cloud cover 608 | cloud_cover = np.divide(sum(sum(cloud_mask.astype(int))), 609 | (cloud_mask.shape[0]*cloud_mask.shape[1])) 610 | # skip image if cloud cover is above threshold 611 | if cloud_cover > settings['cloud_thresh']: 612 | continue 613 | 614 | # convert settings['min_beach_area'] from metres to pixels 615 | pixel_size = 3 616 | min_beach_area_pixels = np.ceil(settings['min_beach_area']/pixel_size**2) 617 | 618 | # classify image in 4 classes (sand, whitewater, water, other) with NN classifier 619 | im_classif = classify_image_NN(im_ms, cloud_mask, classifier) 620 | 621 | # create a stack of boolean images for each label 622 | im_sand = im_classif == 1 623 | im_swash = im_classif == 2 624 | im_water = im_classif == 3 625 | # remove small patches of sand or water that could be around the image (usually noise) 626 | im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_area_pixels, connectivity=2) 627 | im_water = morphology.remove_small_objects(im_water, min_size=min_beach_area_pixels, connectivity=2) 628 | 629 | im_labels = np.stack((im_sand,im_swash,im_water), axis=-1) 630 | 631 | # make a plot 632 | im_RGB = rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9) 633 | # create classified image 634 | im_class = np.copy(im_RGB) 635 | for k in range(0,im_labels.shape[2]): 636 | im_class[im_labels[:,:,k],0] = colours[k,0] 637 | im_class[im_labels[:,:,k],1] = colours[k,1] 638 | im_class[im_labels[:,:,k],2] = colours[k,2] 639 | 640 | # show images 641 | ax[0].imshow(im_RGB) 642 | ax[1].imshow(im_RGB) 643 | ax[1].imshow(im_class, alpha=0.5) 644 | ax[0].axis('off') 645 | ax[1].axis('off') 646 | filename = filenames[i][:filenames[i].find('.')][:-4] 647 | ax[0].set_title(filename) 648 | # save figure 649 | fig.savefig(os.path.join(fp,settings['inputs']['sitename'] + '_' + filename[:19] +'.jpg'), dpi=150) 650 | # clear axes 651 | for cax in fig.axes: 652 | cax.clear() 653 | 654 | # close the figure at the end 655 | plt.close() -------------------------------------------------------------------------------- /coastsat_ps/classifier/models/NN_4classes_PS_NARRA.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydoherty/CoastSat.PlanetScope/07e60499f024fc10037b5532a4c74fd0cae09e62/coastsat_ps/classifier/models/NN_4classes_PS_NARRA.pkl -------------------------------------------------------------------------------- /coastsat_ps/classifier/train_new_classifier.py: -------------------------------------------------------------------------------- 1 | # Train/update a new classifier for CoastSat.PlanetScope. This can improve the accuracy 2 | # of the shoreline detection if the users are experiencing issues with the 3 | # default classifier. 4 | 5 | # Run this script with working directory as: 6 | # "... > CoastSat.PlanetScope > coastsat_ps > classifier" 7 | 8 | #%% Initial settings 9 | 10 | # load modules 11 | import os 12 | import numpy as np 13 | import warnings 14 | warnings.filterwarnings("ignore") 15 | import matplotlib.pyplot as plt 16 | import pickle 17 | import pathlib 18 | 19 | # sklearn modules 20 | from sklearn.model_selection import train_test_split 21 | from sklearn.neural_network import MLPClassifier 22 | from sklearn.model_selection import cross_val_score 23 | from sklearn.externals import joblib 24 | 25 | # coastsat modules 26 | from classifier_functions import (get_ps_metadata, label_images, load_labels, 27 | format_training_data, plot_confusion_matrix, 28 | evaluate_classifier) 29 | 30 | 31 | #%% 32 | def create_folder(filepath): 33 | ''' Creates a filepath if it doesn't already exist 34 | Will not overwrite files that exist 35 | Assign filepath string to a variable 36 | ''' 37 | pathlib.Path(filepath).mkdir(exist_ok=True) 38 | return filepath 39 | 40 | # filepaths 41 | filepath_train = create_folder(os.path.join(os.getcwd(), 'training_data')) 42 | filepath_models = create_folder(os.path.join(os.getcwd(), 'models')) 43 | 44 | 45 | #%% Instructions 46 | 47 | # 0) Classifier can only be updated for one site at a time, repeat below steps for 48 | # each site that requires training 49 | 50 | # 1) Classifier requires merged TOA images. Run CoastSat.PlanetScope for a site up to/including 51 | # step 1.3 (Pre-Processing - image coregistration and scene merging) 52 | 53 | # 2) To manually train classifier for ALL output scenes from step 1, use 54 | # '...CoastSat.PlanetScope/outputs/SITE/toa_image_data/merged_data/local_coreg_merged' 55 | # or equivelent folder (local/global/off) as the variable 'filepath_images' 56 | 57 | # 2.1) To run on a SUBSET of images, copy TOA/nan/cloud files from the above folder 58 | # to a new folder and set this folder as filepath_images variable (below) 59 | # Note: You only need a few images (~10) to train the classifier. 60 | 61 | sitename = 'NARRA' 62 | 63 | filepath_images = ('...CoastSat.PlanetScope/outputs/NARRA/toa_image_data/merged_data/local_coreg_merged') 64 | 65 | epsg = 28356 66 | 67 | classifier_save_name = 'NN_4classes_PS' 68 | 69 | 70 | #%% Update settings 71 | 72 | settings ={'filepath_train':filepath_train, # folder where the labelled images will be stored 73 | 'cloud_thresh':0.9, # percentage of cloudy pixels accepted on the image 74 | 'inputs':{'filepath':filepath_images, 'sitename': sitename}, # folder where the images are stored 75 | 'labels':{'sand':1,'white-water':2,'water':3,'other land features':4}, # labels for the classifier 76 | 'colors':{'sand':[1, 0.65, 0],'white-water':[1,0,1],'water':[0.1,0.1,0.7],'other land features':[0.8,0.8,0.1]}, 77 | 'tolerance':0.02, # this is the pixel intensity tolerance, when using flood fill for sandy pixels 78 | # set to 0 to select one pixel at a time 79 | } 80 | 81 | 82 | #%% Label images [skip if only merging previously manually classified data sets] 83 | # Label the images into 4 classes: sand, white-water, water and other land features. 84 | # The labelled images are saved in the *filepath_train* and can be visualised 85 | # afterwards for quality control. If you make a mistake, don't worry, this can 86 | # be fixed later by deleting the labelled image. 87 | 88 | # label the images with an interactive annotator 89 | 90 | # create compatible metadata dict 91 | metadata = get_ps_metadata(filepath_images, epsg) 92 | 93 | # label images 94 | label_images(metadata, settings) 95 | 96 | 97 | #%% Train Classifier [uses all sites] 98 | 99 | # A Multilayer Perceptron is trained with *scikit-learn*. To train the classifier, the training data needs to be loaded. 100 | # You can use the data that was labelled here and/or the original CoastSat training data. 101 | 102 | # load labelled images 103 | train_sites = next(os.walk(os.path.join(os.getcwd(), 'training_data')))[1] 104 | #train_sites = ['NARRA'] 105 | print('Loading data for sites:\n',train_sites, '\n') 106 | features = load_labels(train_sites, settings) 107 | 108 | 109 | #%% [OPTIONAL] - import previously trained features 110 | 111 | save_pkl = 'CoastSat_PS_training_set_NARRA_50000.pkl' 112 | 113 | # Load the original CoastSat.PlanetScope training data (and optionally merge it with your labelled data) 114 | with open(os.path.join(settings['filepath_train'], save_pkl), 'rb') as f: 115 | features_original = pickle.load(f) 116 | print('Loaded classifier features:') 117 | for key in features_original.keys(): 118 | print('%s : %d pixels'%(key,len(features_original[key]))) 119 | 120 | # # Option 1) add the white-water data from the original training data 121 | # features['white-water'] = np.append(features['white-water'], features_original['white-water'], axis=0) 122 | 123 | # # Option 2) Merge all the classes 124 | for key in features.keys(): 125 | features[key] = np.append(features[key], features_original[key], axis=0) 126 | 127 | # Option 3) Use original data 128 | # features = features_original 129 | 130 | print('\nUpdated classifier features:') 131 | for key in features.keys(): 132 | print('%s : %d pixels'%(key,len(features[key]))) 133 | 134 | 135 | #%% As the classes do not have the same number of pixels, it is good practice to 136 | #subsample the very large classes (in this case 'water' and 'other land features'): 137 | 138 | # Subsample randomly the land and water classes 139 | # as the most important class is 'sand', the number of samples in each class 140 | #should be close to the number of sand pixels 141 | 142 | n_samples = 25000 143 | #for key in ['water', 'other land features']: 144 | for key in ['sand', 'water', 'other land features']: 145 | features[key] = features[key][np.random.choice(features[key].shape[0], n_samples, replace=False),:] 146 | # print classes again 147 | print('Re-sampled classifier features:') 148 | for key in features.keys(): 149 | print('%s : %d pixels'%(key,len(features[key]))) 150 | 151 | 152 | #%% [OPTIONAL] - save features 153 | 154 | # Save name 155 | save_pkl = 'CoastSat_PS_training_set_new.pkl' 156 | save_loc = os.path.join(settings['filepath_train'], save_pkl) 157 | 158 | # Save training data features as .pkl 159 | with open(save_loc, 'wb') as f: 160 | pickle.dump(features, f) 161 | print('New classifier training feature set saved to:\n', save_loc) 162 | 163 | 164 | #%% When the labelled data is ready, format it into X, a matrix of features, and y, a vector of labels: 165 | 166 | # format into X (features) and y (labels) 167 | classes = ['sand','white-water','water','other land features'] 168 | labels = [1,2,3,0] 169 | X,y = format_training_data(features, classes, labels) 170 | 171 | 172 | #%% Divide the dataset into train and test: train on 70% of the data and evaluate on the other 30%: 173 | 174 | # divide in train and test and evaluate the classifier 175 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=0) 176 | classifier = MLPClassifier(hidden_layer_sizes=(100,50), solver='adam') 177 | classifier.fit(X_train,y_train) 178 | print('Accuracy: %0.4f' % classifier.score(X_test,y_test)) 179 | 180 | 181 | #%% [OPTIONAL] A more robust evaluation is 10-fold cross-validation (may take a few minutes to run): 182 | 183 | # cross-validation 184 | scores = cross_val_score(classifier, X, y, cv=10) 185 | print('Accuracy: %0.4f (+/- %0.4f)' % (scores.mean(), scores.std() * 2)) 186 | 187 | 188 | #%% Plot a confusion matrix: 189 | 190 | # plot confusion matrix 191 | y_pred = classifier.predict(X_test) 192 | plot_confusion_matrix(y_test, y_pred, 193 | classes=['other land features','sand','white-water','water'], 194 | normalize=False); 195 | plt.show() 196 | 197 | 198 | #%% [OPTIONAL] - Update classifier save name with number of samples and evaluation results 199 | 200 | # Update save name with settings 201 | classifier_save_name += '_' + str(n_samples) 202 | for site in train_sites: 203 | classifier_save_name += '_' + site 204 | classifier_save_name += '_' + str(int(classifier.score(X_test,y_test)*10000)) 205 | print('New save name is:\n',classifier_save_name) 206 | 207 | 208 | #%% When satisfied with the accuracy and confusion matrix, train the model using 209 | # ALL (no 70/30 split) training data and save 210 | 211 | # train with all the data and save the final classifier 212 | classifier = MLPClassifier(hidden_layer_sizes=(100,50), solver='adam') 213 | classifier.fit(X,y) 214 | save_loc = os.path.join(filepath_models, classifier_save_name + '.pkl') 215 | joblib.dump(classifier, save_loc) 216 | print('New classifier saved to:\n', save_loc) 217 | 218 | 219 | #%% 4. Evaluate the classifier 220 | # Load a classifier that you have trained (specify the classifiers filename) and evaluate it on the satellite images. 221 | # This section will save the output of the classification for each site in a directory named \evaluation. 222 | # Only evaluates images in the filepath_images folder 223 | 224 | classifier_eval = classifier_save_name 225 | 226 | # load and evaluate a classifier 227 | classifier = joblib.load(os.path.join(filepath_models, classifier_eval + '.pkl')) 228 | settings['min_beach_area'] = 1000 229 | settings['cloud_thresh'] = 0.9 230 | 231 | # visualise the classified images 232 | for site in train_sites: 233 | settings['inputs']['sitename'] = site 234 | # load metadata 235 | metadata = get_ps_metadata(filepath_images, epsg) 236 | 237 | # plot the classified images 238 | evaluate_classifier(classifier, classifier_eval, metadata, settings) 239 | 240 | -------------------------------------------------------------------------------- /coastsat_ps/classifier/training_data/CoastSat_PS_training_set_NARRA_50000.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydoherty/CoastSat.PlanetScope/07e60499f024fc10037b5532a4c74fd0cae09e62/coastsat_ps/classifier/training_data/CoastSat_PS_training_set_NARRA_50000.pkl -------------------------------------------------------------------------------- /coastsat_ps/data_import.py: -------------------------------------------------------------------------------- 1 | # Initialisation and Data import functions 2 | # YD, Sep 2020 3 | 4 | import os 5 | import datetime 6 | import numpy as np 7 | from osgeo import gdal 8 | from sklearn.externals import joblib 9 | import json 10 | from shapely.geometry import shape 11 | from shapely.ops import transform 12 | from pyproj import Transformer 13 | 14 | from coastsat_ps.preprocess_tools import create_folder 15 | from coastsat_ps.interactive import transects_from_geojson 16 | 17 | 18 | #%% Pre-Processing Functions 19 | 20 | def initialise_settings(settings): 21 | ''' Initialise user inputs and output folders ''' 22 | 23 | 24 | ############# Additional advanced settings ############################### 25 | 26 | 27 | ### Coregistration 28 | # Georectified TOA reference image path (in CoastSat.PlanetScope/user_inputs folder) 29 | settings['georef_im'] = False #'Narrabeen_im_ref.tif', # Possible to provide a reference image filepath for coregistration instead of manually selecting from a popup window 30 | # Tie point averaged x/y local shift for coregistration 31 | settings['local_avg'] = True 32 | # Tie-point grid spacing (pixels) 33 | settings['grid_size'] = 50 34 | # Tie-point comparison window size (pixels) 35 | settings['window_size'] = (100,100) 36 | # Error filtering level (0-3: lower number less likely to fail but less accurate) 37 | settings['filter_level'] = 2 #3 often fails, bug? 38 | # Workaround for arosics inability to coregister images with different CRS. Reprojects all TOA/mask files to output epsg first. 39 | settings['arosics_reproject'] = False 40 | # GDAL warp CRS re-sampling method. 41 | # 'near' is the fastest/default but images may be jagged as no is smoothing applied. 'cubic' & 'cubicspline' look the best but are slowest. 'bilinear' is a good middle ground. 42 | # Note that re-sampling using cubic, cubicspline and bilinear options may cause issues with arosics. 43 | settings['gdal_method'] = 'near' 44 | # Land mask cleaning smoothing parameters - choose lower values if land mask does not cover thin land regions (ie small barrier islands) 45 | settings['land_mask_smoothing_1'] = 15 # pixels (so x3 for metres) 46 | settings['land_mask_smoothing_2'] = 10 # pixels (so x3 for metres) 47 | 48 | ### Shoreline extraction method 49 | # Water index [NDWI, NmB, RmB, NmG, NmB_Norm, RmB_Norm] 50 | settings['water_index'] = 'NmB' 51 | # Shoreline thresholding method ['Otsu', 'Peak Fraction'] 52 | settings['thresholding'] = 'Peak Fraction' 53 | # Fraction used in custom peak fraction thresholding method 54 | settings['peak_fraction'] = 0.7 55 | # Number of bins for threshold histogram plot 56 | settings['otsu_hist_bins'] = 150 57 | # Include PS determined faulty pixels in cloud mask 58 | settings['faulty_pixels'] = True 59 | 60 | 61 | ### Thin beach width fixes 62 | # Generic detection region - if False individual masks are extracted (slower but more accurate sl extraction) 63 | settings['generic_sl_region'] = False # Use True for beach w no sand when having issues with classifier 64 | # When true, shoreline is based on a generic crop and otsu and does not use a classified image 65 | 66 | 67 | ### Transect intersection settings 68 | # Search width adjacent to transect for SL intersections [in metres] 69 | settings['along_dist'] = 25 70 | # Group statistics for filtering intersection points in transect search area [in metres] 71 | settings['max_std'] = 10 72 | settings['max_range'] = 25 73 | settings['min_val'] = 0 74 | settings['min no. intercepts'] = 5 75 | 76 | 77 | ########################################################################## 78 | 79 | # Check filepath is acceptible for GDAL 80 | if ' ' in os.getcwd(): 81 | raise Exception('Ensure no whitespace in filepath to CoastSat.PlanetScope folder as this causes a gdal error. Edit filepath or move run folder to a new location.') 82 | 83 | # Ensure GDAL directory exists 84 | if os.path.isdir(settings['GDAL_location']) == False: 85 | raise Exception('Ensure GDAL location entered is correct') 86 | 87 | # Ensure working directory is coastsat 88 | if not (os.getcwd()[-20:] != 'CoastSat.PlanetScope' or 89 | os.getcwd()[-25:] != 'CoastSat.PlanetScope-main'): 90 | raise Exception('Change working directory to CoastSat.PlanetScope or CoastSat.PlanetScope-main.' + 91 | 'This line can be commented out if a different folder name is wanted. Check is here to ensure working directory is deliberate so data is not saved in a random location ') 92 | 93 | # Create output_folders 94 | settings['outputs_base_folder'] = create_folder(os.path.join(os.getcwd(),'outputs')) 95 | settings['output_folder'] = create_folder(os.path.join(settings['outputs_base_folder'],settings['site_name'])) # Run directory 96 | 97 | settings['toa_out'] = create_folder(os.path.join(settings['output_folder'],'toa_image_data')) 98 | settings['raw_data'] = create_folder(os.path.join(settings['toa_out'],'raw_data')) 99 | settings['merge_out_base'] = create_folder(os.path.join(settings['toa_out'],'merged_data')) 100 | if settings['im_coreg'] == 'Local Coreg': 101 | settings['merge_out'] = create_folder(os.path.join(settings['merge_out_base'],'local_coreg_merged')) 102 | elif settings['im_coreg'] == 'Global Coreg': 103 | settings['merge_out'] = create_folder(os.path.join(settings['merge_out_base'],'global_coreg_merged')) 104 | elif settings['im_coreg'] == 'Coreg Off': 105 | settings['merge_out'] = create_folder(os.path.join(settings['merge_out_base'], settings['im_coreg'])) 106 | else: 107 | raise Exception('Check co-registration setting selection spelling') 108 | 109 | settings['tif_out'] = create_folder(os.path.join(settings['output_folder'],'index tif outputs')) 110 | settings['index_tif_coreg'] = create_folder(os.path.join(settings['tif_out'], settings['im_coreg'])) 111 | settings['index_tif_out'] = create_folder(os.path.join(settings['index_tif_coreg'], settings['water_index'])) 112 | 113 | settings['sl_png'] = create_folder(os.path.join(settings['output_folder'],'shoreline outputs')) 114 | settings['sl_png_coreg'] = create_folder(os.path.join(settings['sl_png'], settings['im_coreg'])) 115 | settings['sl_threshold'] = create_folder(os.path.join(settings['sl_png_coreg'], settings['water_index'])) 116 | settings['sl_thresh_ind'] = create_folder(os.path.join(settings['sl_threshold'], settings['thresholding'])) # shoreline data out folder 117 | settings['index_png_out'] = create_folder(os.path.join(settings['sl_thresh_ind'], 'Shoreline plots')) 118 | 119 | 120 | # Create filepaths 121 | settings['user_input_folder'] = os.path.join(os.getcwd(), 'user_inputs') 122 | settings['run_input_folder'] = create_folder(os.path.join(settings['output_folder'], 'input_data')) 123 | 124 | settings['sl_pkl_file'] = os.path.join(settings['sl_thresh_ind'], settings['site_name'] + '_' + settings['water_index'] + '_' + settings['thresholding'] + '_shorelines.pkl') # Results out 125 | settings['sl_geojson_file'] = os.path.join(settings['sl_thresh_ind'], settings['site_name'] + '_' + settings['water_index'] + '_' + settings['thresholding'] + '_shorelines.geojson') 126 | settings['sl_transect_csv'] = os.path.join(settings['sl_thresh_ind'], settings['site_name'] + '_' + settings['water_index'] + '_' + settings['thresholding'] + '_transect_SL_data.csv') 127 | 128 | 129 | # Initialise settings 130 | settings['output_epsg'] = 'EPSG:' + settings['output_epsg'] 131 | settings['pixel_size'] = 3 132 | settings['min_beach_area_pixels'] = np.ceil(settings['min_beach_area']/settings['pixel_size']**2) 133 | settings['ref_merge_im_txt'] = os.path.join(settings['run_input_folder'], settings['site_name'] + '_TOA_path.txt') 134 | 135 | 136 | # Initialise classifiers [Could use seperate classifiers for coreg and sl extraction?] 137 | class_path = os.path.join(os.getcwd(),'coastsat_ps', 'classifier', 'models', settings['classifier']) 138 | land_class_path = os.path.join(os.getcwd(),'coastsat_ps', 'classifier', 'models', settings['classifier']) 139 | settings['classifier_load'] = joblib.load(class_path) 140 | settings['land_classifier_load'] = joblib.load(land_class_path) 141 | 142 | 143 | # Import transects 144 | if settings['transects'] != False: 145 | settings['geojson_file'] = os.path.join(settings['user_input_folder'], settings['transects']) 146 | settings['transects_load'] = transects_from_geojson(settings['geojson_file']) 147 | 148 | 149 | # Update coreg settings 150 | if settings['im_coreg'] != 'Coreg Off': 151 | settings['land_mask'] = os.path.join(settings['run_input_folder'], settings['site_name'] + '_land_mask.tif') 152 | if settings['im_coreg'] == 'Local Coreg': 153 | settings['coreg_out'] = create_folder(os.path.join(settings['toa_out'],'local_coreg_data')) 154 | if settings['im_coreg'] == 'Global Coreg': 155 | settings['coreg_out'] = create_folder(os.path.join(settings['toa_out'],'global_coreg_data')) 156 | 157 | if settings['georef_im'] != False: 158 | settings['georef_im_path'] = os.path.join(settings['user_input_folder'], settings['georef_im']) 159 | 160 | 161 | # Update water index settings [band 1, band 2, normalised bool] 162 | # Band list [1 B, 2 G, 3 R, 4 Nir] 163 | if settings['water_index'] == 'NDWI': 164 | settings['water_index_list'] = [4, 2, True, '_NDWI_norm.tif'] 165 | elif settings['water_index'] == 'NmG': 166 | settings['water_index_list'] = [4, 2, False, '_NmG.tif'] 167 | elif settings['water_index'] == 'NmB_Norm': 168 | settings['water_index_list'] = [4, 1, True, '_NmB_norm.tif'] 169 | elif settings['water_index'] == 'NmB': 170 | settings['water_index_list'] = [4, 1, False, '_NmB.tif'] 171 | elif settings['water_index'] == 'RmB_Norm': 172 | settings['water_index_list'] = [4, 3, True, '_RmB_norm.tif'] 173 | elif settings['water_index'] == 'RmB': 174 | settings['water_index_list'] = [4, 3, False, '_RmB.tif'] 175 | 176 | 177 | # Create AOI polygon from KML file 178 | settings['aoi_geojson'] = os.path.join(settings['run_input_folder'], settings['site_name'] + '_aoi.geojson') 179 | gdal.VectorTranslate(settings['aoi_geojson'], gdal.OpenEx(os.path.join(settings['user_input_folder'], settings['aoi_kml'])), format='GeoJSON') 180 | 181 | 182 | # Scan downloads folder and map relevent contents 183 | outputs = {} 184 | outputs['downloads_map'] = map_downloads(settings) 185 | 186 | 187 | # output dictionary as log file 188 | out_file = os.path.join(settings['sl_thresh_ind'], 'input_settings_log_file.csv') 189 | with open(out_file, 'w') as f: 190 | for key in settings.keys(): 191 | f.write("%s, %s\n" % (key, settings[key])) 192 | 193 | # print AOI area 194 | calculate_aoi_area(settings) 195 | 196 | return outputs 197 | 198 | 199 | def calculate_aoi_area(settings): 200 | # Load the GeoJSON file 201 | with open(settings['aoi_geojson'], 'r') as f: 202 | geojson_data = json.load(f) 203 | # Extract the first feature's geometry 204 | polygon = shape(geojson_data['features'][0]['geometry']) 205 | # Create a transformer from WGS84 (EPSG:4326) to projected EPSG 206 | transformer = Transformer.from_crs("EPSG:4326", settings['output_epsg'], always_xy=True) 207 | # Transform the polygon coordinates 208 | projected_polygon = transform(transformer.transform, polygon) 209 | # Calculate the area 210 | area = projected_polygon.area 211 | print("AOI is", round(area/(1000*1000),1), 'km^2') 212 | 213 | 214 | def map_downloads(settings): 215 | ''' Scan download folder and create dictionary of data ''' 216 | 217 | # unpack input dictionary 218 | folder = settings['downloads_folder'] 219 | 220 | # Remove .aux.xml filed that can be generated by QGIS 221 | for file in os.listdir(folder): 222 | if file.endswith('.aux.xml'): 223 | os.remove(os.path.join(folder,file)) 224 | 225 | # initialise output 226 | map_dict = {} 227 | count = 0 228 | 229 | # Initialise dictionary structure using file dates 230 | for file in os.listdir(folder): 231 | if file.endswith('_AnalyticMS_clip.tif'): 232 | # get date of file 233 | year = file[0:4] 234 | month = file[4:6] 235 | day = file[6:8] 236 | date = year + '-' + month + '-' + day 237 | count = count + 1 238 | 239 | # create dictionary for each date 240 | if date not in map_dict.keys(): 241 | map_dict[date] = {} 242 | for end in ['_AnalyticMS_clip.tif','_udm2_clip.tif','_udm_clip.tif','_metadata.json','_metadata_clip.xml']: 243 | map_dict[date][end] = { 244 | 'filenames': [], 245 | 'filepaths':[]} 246 | if end == '_AnalyticMS_clip.tif': 247 | map_dict[date][end]['timestamp'] = [] 248 | 249 | # Fill in dictionary with filepath details 250 | length = len(os.listdir(folder)) 251 | count_file = 0 252 | for file in os.listdir(folder): 253 | count_file = count_file + 1 254 | print('\rUpdating Dictionary ' + str(round((count_file/length)*100,2)) + '% ', end = '') 255 | for end in ['_AnalyticMS_clip.tif','_udm2_clip.tif','_udm_clip.tif','_metadata.json','_metadata_clip.xml']: 256 | if file.endswith(end): 257 | # get date of file 258 | year = file[0:4] 259 | month = file[4:6] 260 | day = file[6:8] 261 | date = year + '-' + month + '-' + day 262 | 263 | # update dictionary 264 | map_dict[date][end]['filenames'] = map_dict[date][end]['filenames'] + [file] 265 | map_dict[date][end]['filepaths'] = map_dict[date][end]['filepaths'] + [os.path.join(folder, file)] 266 | 267 | if end == '_AnalyticMS_clip.tif': 268 | # add timestamp 269 | hour = file[9:11] 270 | minutes = file[11:13] 271 | seconds = file[13:15] 272 | timestamp = date + ' '+str(hour) +':'+ str(minutes) +':'+str(seconds) 273 | timestamp = datetime.datetime.strptime(timestamp,'%Y-%m-%d %H:%M:%S') 274 | map_dict[date][end]['timestamp'] = map_dict[date][end]['timestamp'] + [timestamp] 275 | 276 | print('\n ',count,'images found over',len(map_dict),'dates') 277 | 278 | settings['input_image_count'] = count 279 | 280 | return map_dict 281 | 282 | -------------------------------------------------------------------------------- /coastsat_ps/extract_shoreline.py: -------------------------------------------------------------------------------- 1 | # Shoreline extraction functions 2 | 3 | import time 4 | import pathlib 5 | import rasterio 6 | import numpy as np 7 | from osgeo import gdal 8 | import os 9 | import pickle 10 | import skimage.morphology as morphology 11 | import matplotlib.pyplot as plt 12 | import pandas as pd 13 | import matplotlib.cm as cm 14 | import shutil 15 | 16 | from coastsat_ps.shoreline_tools import (calc_water_index, classify_single, 17 | create_shoreline_buffer, 18 | process_shoreline, sl_extract, 19 | sl_extract_generic, output_to_gdf_PL) 20 | 21 | from coastsat_ps.interactive import get_ps_data, convert_world2pix, rescale_image_intensity 22 | 23 | from coastsat_ps.plotting import (initialise_plot, initialise_plot_gen, rgb_plot, 24 | index_plot, class_plot, histogram_plot, 25 | histogram_plot_split) 26 | 27 | 28 | #%% Overall shoreline extraction function 29 | 30 | def extract_shorelines(outputs, settings, del_index = False, reclassify = False, rerun_shorelines = False): 31 | 32 | if os.path.isfile(settings['sl_pkl_file']) and rerun_shorelines == False: 33 | # save outputput structure as output.pkl 34 | with open(settings['sl_pkl_file'], 'rb') as f: 35 | shorelines = pickle.load(f) 36 | print('Previously run shorelines loaded') 37 | else: 38 | # Extract shoreline crop region and calculate water index 39 | batch_index_and_classify(outputs, settings, reclassify = reclassify) 40 | index_dict_update(outputs, settings) 41 | 42 | # Threshold water index images, extract shorelines and plot 43 | shorelines = batch_threshold_sl(outputs, settings) 44 | 45 | if del_index: 46 | shutil.rmtree(settings['index_tif_out']) 47 | # Create blank folder for later runs 48 | pathlib.Path(settings['index_tif_out']).mkdir(exist_ok=True) 49 | 50 | return shorelines 51 | 52 | 53 | #%% Functions 54 | 55 | def create_crop_mask(im_classif, im_ref_buffer, settings): 56 | 57 | # Extract class masks 58 | im_sand = im_classif == 1 59 | im_ww = im_classif == 2 60 | im_water = im_classif == 3 61 | 62 | # Create mask of non sand or water pixels 63 | class_mask = ~(im_sand + im_water + im_ww)>0 64 | 65 | # Combine all masks 66 | crop_mask = (class_mask + im_ref_buffer) >0 67 | 68 | # Smooth classified edges (thin other removed) 69 | out_mask = morphology.binary_opening(crop_mask,morphology.square(6)) # perform image opening 70 | 71 | # Add buffer to sl edge 72 | if settings['thin_beach_fix'] == True: 73 | crop_mask = crop_mask == 0 74 | # Adds buffer to sl edge (buffer around sand) to allow contouring of back beach/where no sand present 75 | out_mask = morphology.binary_dilation(crop_mask, morphology.square(5)) 76 | out_mask = morphology.remove_small_objects(out_mask, 77 | min_size=3*9, 78 | connectivity=1) 79 | out_mask = out_mask == 0 80 | else: 81 | # buffer 3 pixels inland to prevent contouring on landward side of sand class 82 | out_mask = morphology.binary_dilation(out_mask, morphology.square(3)) 83 | 84 | # Clean up image (small other removed) 85 | out_mask = morphology.remove_small_objects(out_mask, 86 | min_size=settings['min_beach_area_pixels'], 87 | connectivity=1) 88 | 89 | return out_mask, ~im_sand, ~im_water 90 | 91 | 92 | def batch_index_and_classify(outputs, settings, reclassify = False): 93 | # counters 94 | number_dates = len(outputs['merged_data']) 95 | start_time = time.time() 96 | 97 | # Create ref mask for generic mask setting 98 | if settings['generic_sl_region'] == True: 99 | im_classif = classify_single(settings['classifier_load'], 100 | settings, 101 | settings['ref_merge_im'], 102 | no_mask = False, raw_mask = False) 103 | 104 | # Extract shorelines 105 | for n, dates in enumerate(outputs['merged_data']): 106 | 107 | # Print progress 108 | curr_time = int(time.time() - start_time) 109 | time_elap = str(int(curr_time/60)) + 'm ' + str(curr_time%60) + 's' 110 | print('\rClassifying images for date', n+1, 'of',number_dates,'(' + time_elap,' elapsed)', end = '') 111 | 112 | for sat in outputs['merged_data'][dates]: 113 | # extract data/structure 114 | image_dict = outputs['merged_data'][dates][sat] 115 | toa_path = image_dict['toa_filepath'][0] 116 | image_name = image_dict['toa_filename'][0][0:28] 117 | class_path = toa_path.replace('TOA.tif', 'class.tif') 118 | 119 | # Save im_classif if it doesnt exist already 120 | if (not os.path.isfile(class_path)) or (reclassify == True): 121 | # Create ref mask for individual mask setting 122 | if settings['generic_sl_region'] == False: 123 | # Classify image 124 | im_classif = classify_single(settings['classifier_load'], 125 | settings, 126 | toa_path, 127 | no_mask = False, raw_mask = False) 128 | 129 | # Copy geo info from TOA file 130 | with rasterio.open(toa_path, 'r') as src: 131 | kwargs = src.meta 132 | # Update band info/type 133 | kwargs.update( 134 | dtype=rasterio.uint8, 135 | count = 1) 136 | # Save im_classif in TOA folder 137 | with rasterio.open(class_path, 'w', **kwargs) as dst: 138 | dst.write_band(1, im_classif.astype(rasterio.uint8)) 139 | 140 | # Calculate water index 141 | calc_water_index(toa_path, settings, image_name) 142 | 143 | 144 | def index_dict_update(outputs, settings): 145 | print('\nUpdating output dictionary...') 146 | # Extract shorelines 147 | for n, dates in enumerate(outputs['merged_data']): 148 | for sat in outputs['merged_data'][dates]: 149 | # extract data/structure 150 | image_dict = outputs['merged_data'][dates][sat] 151 | 152 | # Construct save name 153 | image_name = image_dict['toa_filename'][0][0:28] 154 | file_end = settings['water_index_list'][3] 155 | index_file = os.path.join(settings['index_tif_out'], image_name + file_end) 156 | 157 | # Save to dict 158 | image_dict['index_file'] = index_file 159 | 160 | 161 | def batch_threshold_sl(outputs, settings): 162 | 163 | # Initialise count 164 | number_dates = len(outputs['merged_data']) 165 | start_time = time.time() 166 | plt.close("all") 167 | 168 | # Create save structure 169 | shorelines_out = {'shorelines':[], 170 | 'date':[], 171 | 'timestamp utc':[], 172 | 'time':[], 173 | 'name':[], 174 | 'cloud_cover':[], 175 | 'aoi_cover':[], 176 | 'ps_sat_name':[], 177 | 'sensor': [], 178 | 'threshold':[] 179 | } 180 | 181 | # Extract general data 182 | image_epsg = int(settings['output_epsg'].replace('EPSG:','')) 183 | 184 | #calculate a buffer around the reference shoreline 185 | with rasterio.open(settings['ref_merge_im'], 'r') as src: 186 | im_shape = src.read(1).shape 187 | image_epsg = int(str(src.crs).replace('EPSG:','')) 188 | data = gdal.Open(settings['ref_merge_im'], gdal.GA_ReadOnly) 189 | georef = np.array(data.GetGeoTransform()) 190 | im_ref_buffer = create_shoreline_buffer(im_shape, georef, image_epsg, 191 | settings['pixel_size'], settings) 192 | 193 | # Transects to pic coords 194 | transects = {} 195 | for ts in settings['transects_load']: 196 | transects[ts] = convert_world2pix(settings['transects_load'][ts], georef) 197 | 198 | # Find generic im_classif from ref_merge_im 199 | with rasterio.open(settings['ref_merge_im'].replace('TOA.tif', 'class.tif')) as src: 200 | im_classif = src.read(1) 201 | 202 | # Create generic masks 203 | mask_gen, sand_mask, water_mask = create_crop_mask(im_classif, im_ref_buffer, settings) 204 | 205 | # Initialise plot colours 206 | cmap = cm.get_cmap('tab20c') 207 | colorpalette = cmap(np.arange(0,13,1)) 208 | colours = np.zeros((3,4)) 209 | colours[0,:] = colorpalette[5] # sand 210 | colours[1,:] = np.array([150/255,1,1,1]) # ww 211 | colours[2,:] = np.array([0,91/255,1,1]) # water 212 | 213 | # Loop through images 214 | for n, dates in enumerate(outputs['merged_data']): 215 | for sat in outputs['merged_data'][dates]: 216 | # Print progress 217 | curr_time = int(time.time() - start_time) 218 | time_elap = str(int(curr_time/60)) + 'm ' + str(curr_time%60) + 's' 219 | print('\rExtracting shoreline for date', n+1, 'of',number_dates,'(' + time_elap,' elapsed)', end = '') 220 | 221 | # extract data/structure and settings 222 | image_dict = outputs['merged_data'][dates][sat] 223 | index_im = image_dict['index_file'] 224 | georef = image_dict['georef'] 225 | toa_filepath = image_dict['toa_filepath'][0] 226 | georef = image_dict['georef'] 227 | im_name = image_dict['toa_filename'][0][0:28] 228 | class_path = toa_filepath.replace('TOA.tif', 'class.tif') 229 | 230 | # Import water index and mask 231 | with rasterio.open(index_im) as src: 232 | index = src.read(1) 233 | 234 | # Extract crop mask 235 | if settings['generic_sl_region'] == False: 236 | 237 | with rasterio.open(class_path) as src: 238 | im_classif = src.read(1) 239 | 240 | # Check if PS image size matches the generic SL buffer: 241 | mask_gen_flag = False 242 | if im_classif.shape != im_ref_buffer.shape: 243 | print('\n\n ', im_name, 'image dimensions do not match selected reference image. Workaround applied for this image.\n') 244 | # create temp ref_buffer and mask with same dimensions 245 | im_ref_buffer_temp = create_shoreline_buffer(im_classif.shape, georef, image_epsg, settings['pixel_size'], settings) 246 | mask, sand_mask, water_mask = create_crop_mask(im_classif, im_ref_buffer_temp, settings) 247 | mask_gen_flag = True 248 | 249 | else: 250 | # Create crop mask (standard setting) 251 | mask, sand_mask, water_mask = create_crop_mask(im_classif, im_ref_buffer, settings) 252 | 253 | else: 254 | mask = mask_gen 255 | 256 | # Get nan/cloud mask and im_ms for plotting 257 | im_ms, comb_mask = get_ps_data(toa_filepath) 258 | 259 | # Apply classified mask and cloud/nan 260 | masked_im = np.copy(index) 261 | if (index.shape != mask.shape) and (settings['generic_sl_region']): 262 | print('\n\n ', im_name, 'image dimensions do not match selected reference image. Extraction skipped for this image.\n') 263 | continue 264 | else: 265 | masked_im[mask == 1] = np.nan 266 | masked_im[comb_mask] = np.nan 267 | 268 | # Extract sl contours (check # sand pixels) 269 | if settings['generic_sl_region'] or (np.sum(~sand_mask) < 100): 270 | contours, vec, t_otsu = sl_extract_generic(masked_im, settings) 271 | else: 272 | # Generic crop region for contouring only (avoids 'other' misclassification error) 273 | masked_im_gen = np.copy(index) 274 | if mask_gen_flag: 275 | masked_im_gen[mask == 1] = np.nan 276 | else: 277 | masked_im_gen[mask_gen == 1] = np.nan 278 | masked_im_gen[comb_mask] = np.nan 279 | # Extract SL 280 | contours, vec, t_otsu = sl_extract(masked_im, sand_mask, water_mask, masked_im_gen, settings) 281 | 282 | # Process shorelines 283 | shoreline_single = process_shoreline(contours, comb_mask, 284 | georef, image_epsg, settings) 285 | 286 | # Process shoreline for plotting 287 | sl_pix = convert_world2pix(shoreline_single, georef) 288 | im_RGB = rescale_image_intensity(im_ms[:,:,[2,1,0]], comb_mask, 99.9) 289 | 290 | # Plot shorelines and histogram 291 | if settings['generic_sl_region']: 292 | fig, ax1, ax2, ax3 = initialise_plot_gen(settings, im_name, index) 293 | rgb_plot(ax1, im_RGB, sl_pix, transects) 294 | index_plot(ax2, index, t_otsu, comb_mask, sl_pix, 295 | transects, fig, settings) 296 | histogram_plot(ax3, vec, t_otsu, settings) 297 | else: 298 | fig, ax1, ax2, ax3, ax4 = initialise_plot(settings, im_name, index) 299 | rgb_plot(ax1, im_RGB, sl_pix, transects) 300 | #ax1.imshow(mask) 301 | class_plot(ax2, im_RGB, im_classif, sl_pix, transects, settings, colours) 302 | index_plot(ax3, index, t_otsu, comb_mask, sl_pix, 303 | transects, fig, settings) 304 | if mask_gen_flag: 305 | histogram_plot_split(ax4, index, im_classif, im_ref_buffer_temp, t_otsu, settings, colours) 306 | else: 307 | histogram_plot_split(ax4, index, im_classif, im_ref_buffer, t_otsu, settings, colours) 308 | 309 | # Save plot 310 | plt.close('all') 311 | save_path = settings['index_png_out'] 312 | save_file = os.path.join(save_path, im_name + ' shoreline plot.png') 313 | fig.savefig(save_file, dpi=250)#, bbox_inches='tight', pad_inches=0.7) 314 | 315 | # update dictionary 316 | shorelines_out['shorelines'] += [shoreline_single] 317 | shorelines_out['date'] += [str(image_dict['timestamp'][0].date())] 318 | shorelines_out['time'] += [str(image_dict['timestamp'][0].time())] 319 | shorelines_out['timestamp utc'] += [image_dict['timestamp'][0]] 320 | shorelines_out['cloud_cover'] += [image_dict['cloud_cover'][0]] 321 | shorelines_out['aoi_cover'] += [image_dict['aoi_coverage'][0]] 322 | shorelines_out['ps_sat_name'] += [sat] 323 | shorelines_out['name'] += [im_name] 324 | shorelines_out['sensor'] += [image_dict['Sensor'][0]] 325 | shorelines_out['threshold'] += [t_otsu] 326 | 327 | # Sort dictionary chronologically 328 | idx_sorted = sorted(range(len(shorelines_out['timestamp utc'])), key=shorelines_out['timestamp utc'].__getitem__) 329 | for key in shorelines_out.keys(): 330 | shorelines_out[key] = [shorelines_out[key][i] for i in idx_sorted] 331 | 332 | # saving outputs 333 | print('\n Saving shorelines to pkl and geojson') 334 | 335 | # save outputput structure as output.pkl 336 | with open(settings['sl_pkl_file'], 'wb') as f: 337 | pickle.dump(shorelines_out, f) 338 | 339 | # comvert to geopandas 340 | gdf = output_to_gdf_PL(shorelines_out) 341 | 342 | # set projection 343 | # gdf.crs = {'init':str(settings['output_epsg'])} 344 | gdf.crs = settings['output_epsg'] 345 | 346 | # save as geojson 347 | gdf.to_file(settings['sl_geojson_file'], driver='GeoJSON', encoding='utf-8') 348 | 349 | return shorelines_out 350 | 351 | 352 | 353 | #%% 354 | 355 | def compute_intersection(shoreline_data, settings): 356 | """ 357 | Computes the intersection between the 2D shorelines and the shore-normal. 358 | transects. It returns time-series of cross-shore distance along each transect. 359 | 360 | KV WRL 2018 361 | 362 | Modified YD 2020 363 | 364 | Arguments: 365 | ----------- 366 | output: dict 367 | contains the extracted shorelines and corresponding metadata 368 | transects: dict 369 | contains the X and Y coordinates of each transect 370 | settings: dict with the following keys 371 | 'along_dist': int 372 | alongshore distance considered caluclate the intersection 373 | 374 | Returns: 375 | ----------- 376 | cross_dist: dict 377 | time-series of cross-shore distance along each of the transects. 378 | Not tidally corrected. 379 | 380 | """ 381 | print('Calculating shoreline intersections...') 382 | 383 | # unpack data 384 | shorelines = shoreline_data['shorelines'] 385 | transects = settings['transects_load'] 386 | 387 | # loop through shorelines and compute the median intersection 388 | intersections = np.zeros((len(shorelines),len(transects))) 389 | for i in range(len(shorelines)): 390 | 391 | sl = shorelines[i] 392 | 393 | for j,key in enumerate(list(transects.keys())): 394 | 395 | # compute rotation matrix 396 | X0 = transects[key][0,0] 397 | Y0 = transects[key][0,1] 398 | temp = np.array(transects[key][-1,:]) - np.array(transects[key][0,:]) 399 | phi = np.arctan2(temp[1], temp[0]) 400 | Mrot = np.array([[np.cos(phi), np.sin(phi)],[-np.sin(phi), np.cos(phi)]]) 401 | 402 | # calculate point to line distance between shoreline points and the transect 403 | p1 = np.array([X0,Y0]) 404 | p2 = transects[key][-1,:] 405 | d_line = np.abs(np.cross(p2-p1,sl-p1)/np.linalg.norm(p2-p1)) 406 | # calculate the distance between shoreline points and the origin of the transect 407 | d_origin = np.array([np.linalg.norm(sl[k,:] - p1) for k in range(len(sl))]) 408 | # find the shoreline points that are close to the transects and to the origin 409 | # the distance to the origin is hard-coded here to 1 km 410 | idx_dist = np.logical_and(d_line <= settings['along_dist'], d_origin <= 1000) 411 | # find the shoreline points that are in the direction of the transect (within 90 degrees) 412 | temp_sl = sl - np.array(transects[key][0,:]) 413 | phi_sl = np.array([np.arctan2(temp_sl[k,1], temp_sl[k,0]) for k in range(len(temp_sl))]) 414 | diff_angle = (phi - phi_sl) 415 | idx_angle = np.abs(diff_angle) < np.pi/2 416 | # combine the transects that are close in distance and close in orientation 417 | idx_close = np.where(np.logical_and(idx_dist,idx_angle))[0] 418 | 419 | # in case there are no shoreline points close to the transect 420 | if len(idx_close) == 0: 421 | intersections[i,j] = np.nan 422 | else: 423 | # change of base to shore-normal coordinate system 424 | xy_close = np.array([sl[idx_close,0],sl[idx_close,1]]) - np.tile(np.array([[X0], 425 | [Y0]]), (1,len(sl[idx_close]))) 426 | xy_rot = np.matmul(Mrot, xy_close) 427 | 428 | # remove points that are too far landwards relative to the transect origin (i.e., negative chainage) 429 | xy_rot[0, xy_rot[0,:] < settings['min_val']] = np.nan 430 | 431 | # compute std, median, max, min of the intersections 432 | std_intersect = np.nanstd(xy_rot[0,:]) 433 | max_intersect = np.nanmax(xy_rot[0,:]) 434 | min_intersect = np.nanmin(xy_rot[0,:]) 435 | n_intersect = len(xy_rot[0,:]) 436 | 437 | # quality control the intersections using dispersion metrics (std and range) 438 | condition1 = std_intersect <= settings['max_std'] 439 | condition2 = (max_intersect - min_intersect) <= settings['max_range'] 440 | condition3 = n_intersect > settings['min no. intercepts'] 441 | if np.logical_and(np.logical_and(condition1, condition2), condition3): 442 | # compute the median of the intersections along the transect 443 | intersections[i,j] = np.nanmedian(xy_rot[0,:]) 444 | else: 445 | intersections[i,j] = np.nan 446 | 447 | # fill the results into a dictionnary 448 | cross_dist = dict([]) 449 | for j,key in enumerate(list(transects.keys())): 450 | cross_dist[key] = intersections[:,j] 451 | 452 | shoreline_data['cross_distance'] = cross_dist 453 | 454 | # save a .csv file for Excel users 455 | # Initialise data columns 456 | col_list = ['timestamp utc', 'filter', 'ps_sat_name', 'sensor', 'cloud_cover', 'aoi_cover', 'threshold'] 457 | col_names = ['Date', 'Filter', 'PS Satellite Key', 'Sensor', 'Cloud Cover %', 'AOI Coverage %', 'Index Threshold'] 458 | 459 | # Create dataframe 460 | csv_out = pd.DataFrame() 461 | for i in range(len(col_list)): 462 | csv_out[col_names[i]] = shoreline_data[col_list[i]] 463 | 464 | # Add intersection data 465 | for ts in cross_dist: 466 | col = pd.DataFrame(cross_dist[ts]) 467 | csv_out[ts] = col 468 | 469 | # Save file 470 | csv_out = csv_out.round(2) 471 | csv_out.to_csv(settings['sl_transect_csv']) 472 | 473 | return csv_out 474 | 475 | 476 | -------------------------------------------------------------------------------- /coastsat_ps/plotting.py: -------------------------------------------------------------------------------- 1 | # Plotting 2 | 3 | import numpy as np 4 | import rasterio 5 | 6 | import matplotlib.pyplot as plt 7 | import matplotlib.cm as cm 8 | from matplotlib import gridspec 9 | import matplotlib.colors as mcolors 10 | import matplotlib.patches as mpatches 11 | import matplotlib.lines as mlines 12 | 13 | import skimage.morphology as morphology 14 | 15 | import json 16 | from shapely.geometry import shape, LineString 17 | from shapely.ops import transform 18 | from pyproj import Transformer 19 | 20 | 21 | #%% 22 | 23 | class MidpointNormalize(mcolors.Normalize): 24 | """ 25 | Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value) 26 | e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100)) 27 | Credit: Joe Kington, http://chris35wills.github.io/matplotlib_diverging_colorbar/ 28 | """ 29 | def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False): 30 | self.midpoint = midpoint 31 | mcolors.Normalize.__init__(self, vmin, vmax, clip) 32 | 33 | def __call__(self, value, clip=None): 34 | # I'm ignoring masked values and all kinds of edge cases to make a 35 | # simple example... 36 | x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] 37 | return np.ma.masked_array(np.interp(value, x, y), np.isnan(value)) 38 | 39 | 40 | #%% 41 | 42 | def initialise_plot(settings, im_name, index): 43 | 44 | plt.ioff() 45 | 46 | # create figure 47 | fig = plt.figure() 48 | fig.tight_layout() 49 | fig.set_size_inches([8, 8]) 50 | #mng = plt.get_current_fig_manager() 51 | #mng.window.showMaximized() 52 | 53 | # according to the image shape, decide whether it is better to have the images 54 | # in vertical subplots or horizontal subplots 55 | if index.shape[0] > 0.75*index.shape[1]: 56 | # vertical subplots 57 | gs = gridspec.GridSpec(nrows = 10, ncols = 30, 58 | wspace = 0, hspace = 0.15, 59 | bottom=0.07, top=0.89, 60 | left=0.1, right=0.9) 61 | 62 | ax1 = fig.add_subplot(gs[0:7,0:10]) 63 | ax2 = fig.add_subplot(gs[0:7,10:20], sharex=ax1, sharey=ax1) 64 | ax3 = fig.add_subplot(gs[0:7,20:30], sharex=ax1, sharey=ax1) 65 | ax4 = fig.add_subplot(gs[8:,1:29]) 66 | 67 | else: 68 | # horizontal subplots 69 | print(('\n Code to format non-vertical images for plots is not properly developed - this will not impact shoreline extraction.') + 70 | ('Manually edit initialise_plot function in coastsat_ps > plotting.py file as required.\n')) 71 | gs = gridspec.GridSpec(nrows = 8, ncols = 30, 72 | wspace = 0, hspace = 0.15, 73 | bottom=0.07, top=0.89, 74 | left=0.1, right=0.9) 75 | 76 | ax1 = fig.add_subplot(gs[0:2,:]) 77 | ax2 = fig.add_subplot(gs[2:4,:], sharex=ax1, sharey=ax1) 78 | ax3 = fig.add_subplot(gs[4:6,:], sharex=ax1, sharey=ax1) 79 | ax4 = fig.add_subplot(gs[6:,1:29]) # histogram 80 | 81 | # Set title from im_name 82 | fig.suptitle(settings['water_index'] + ' Water Index with ' + 83 | settings['thresholding'] + ' Thresholding\n' + 84 | im_name, 85 | fontsize = 12) 86 | 87 | return fig, ax1, ax2, ax3, ax4 88 | 89 | 90 | def initialise_plot_gen(settings, im_name, index): 91 | 92 | plt.ioff() 93 | 94 | # create figure 95 | fig = plt.figure() 96 | fig.tight_layout() 97 | fig.set_size_inches([8, 8]) 98 | #mng = plt.get_current_fig_manager() 99 | #mng.window.showMaximized() 100 | 101 | # according to the image shape, decide whether it is better to have the images 102 | # in vertical subplots or horizontal subplots 103 | if index.shape[0] > 0.75*index.shape[1]: 104 | # vertical subplots 105 | gs = gridspec.GridSpec(nrows = 10, ncols = 30, 106 | wspace = 0, hspace = 0.15, 107 | bottom=0.07, top=0.89, 108 | left=0.1, right=0.9) 109 | 110 | ax1 = fig.add_subplot(gs[0:7,0:15]) 111 | ax2 = fig.add_subplot(gs[0:7,15:30], sharex=ax1, sharey=ax1) 112 | ax3 = fig.add_subplot(gs[8:,1:29]) 113 | 114 | else: 115 | # horizontal subplots 116 | print(('\n Code to format non-vertical images for plots is not properly developed - this will not impact shoreline extraction.') + 117 | ('Manually edit initialise_plot_gen function in coastsat_ps > plotting.py file as required.\n')) 118 | gs = gridspec.GridSpec(nrows = 8, ncols = 30, 119 | wspace = 0, hspace = 0.15, 120 | bottom=0.07, top=0.89, 121 | left=0.1, right=0.9) 122 | 123 | ax1 = fig.add_subplot(gs[0:3,:]) 124 | ax2 = fig.add_subplot(gs[3:6,:], sharex=ax1, sharey=ax1) 125 | ax3 = fig.add_subplot(gs[6:,1:29]) # histogram 126 | 127 | # Set title from im_name 128 | fig.suptitle(settings['water_index'] + ' Water Index with ' + 129 | settings['thresholding'] + ' Thresholding\n' + 130 | im_name, 131 | fontsize = 12) 132 | 133 | return fig, ax1, ax2, ax3 134 | 135 | #%% 136 | 137 | 138 | def rgb_plot(ax, im_RGB, sl_pix, transects): 139 | 140 | # Set nan colour 141 | im_RGB = np.where(np.isnan(im_RGB), 0.3, im_RGB) 142 | 143 | # Plot background RGB im 144 | ax.imshow(im_RGB) 145 | 146 | # Overlay shoreline 147 | ax.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize = 0.3) 148 | 149 | # Plot transects 150 | for pf in transects.keys(): 151 | points = transects[pf] 152 | ax.plot(points[:,0], points[:,1], color = 'k', linestyle = ':') 153 | 154 | # Decide text layout 155 | if points[0,0] > points[1,0]: 156 | ha = 'right' 157 | text = pf + ' ' 158 | else: 159 | ha = 'left' 160 | text = ' ' + pf 161 | 162 | ax.text(points[1,0], points[1,1], text, fontsize = 8, color = 'white', 163 | ha = ha, va = 'center') 164 | 165 | # Figure settings 166 | ax.axis('off') 167 | ax.set_title('RGB', fontsize=10) 168 | 169 | 170 | #%% 171 | 172 | def class_plot(ax, im_RGB, im_classif, sl_pix, transects, settings, colours, include_lines = True): 173 | 174 | # compute classified image 175 | im_class = np.copy(im_RGB) 176 | 177 | # Create coastsat class format 178 | im_sand = im_classif == 1 179 | im_swash = im_classif == 2 180 | im_water = im_classif == 3 181 | 182 | # remove small patches of sand or water that could be around the image (usually noise) 183 | im_sand = morphology.remove_small_objects(im_sand, min_size=settings['min_beach_area_pixels'], connectivity=2) 184 | im_water = morphology.remove_small_objects(im_water, min_size=settings['min_beach_area_pixels'], connectivity=2) 185 | im_labels = np.stack((im_sand,im_swash,im_water), axis=-1) 186 | 187 | # Apply colours 188 | for k in range(0,im_labels.shape[2]): 189 | im_class[im_labels[:,:,k],0] = colours[k,0] 190 | im_class[im_labels[:,:,k],1] = colours[k,1] 191 | im_class[im_labels[:,:,k],2] = colours[k,2] 192 | 193 | # Set nan colour 194 | im_class = np.where(np.isnan(im_class), 0.3, im_class) 195 | 196 | # Plot classes over RGB 197 | ax.imshow(im_class) 198 | 199 | if include_lines: 200 | # Plot shoreline 201 | ax.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize = 0.3) 202 | 203 | # Plot transects 204 | for pf in transects.values(): 205 | ax.plot(pf[:,0], pf[:,1], color = 'k', linestyle = ':') 206 | 207 | # Plot colours 208 | orange_patch = mpatches.Patch(color=colours[0,:], label='sand') 209 | white_patch = mpatches.Patch(color=colours[1,:], label='whitewater') 210 | blue_patch = mpatches.Patch(color=colours[2,:], label='water') 211 | black_patch = mpatches.Patch(color='0.3', label='nan/cloud') 212 | black_line = mlines.Line2D([],[],color='k',linestyle='-', label='shoreline') 213 | red_line = mlines.Line2D([],[],color='k',linestyle=':', label='transects') 214 | 215 | # Add legend 216 | ax.legend(handles=[orange_patch, white_patch, blue_patch, black_patch, 217 | black_line, red_line], 218 | bbox_to_anchor=(0.5, 0), loc='upper center', fontsize=9, 219 | ncol = 6) 220 | 221 | # General settings 222 | ax.axis('off') 223 | ax.set_title('Classified Image', fontsize=10) 224 | 225 | 226 | 227 | #%% 228 | 229 | 230 | def index_plot(ax, index_in, t_otsu, comb_mask, sl_pix, transects, fig, settings): 231 | 232 | # Mask index 233 | index = np.copy(index_in) 234 | index[comb_mask] = np.nan 235 | 236 | # Find index limits 237 | min = np.nanmin(index) 238 | max = np.nanmax(index) 239 | 240 | # Plot colourised index 241 | cmap = plt.cm.coolwarm # red to blue 242 | cmap.set_bad(color='0.3') 243 | cax = ax.imshow(index, 244 | cmap=cmap, 245 | clim=(min, max), 246 | norm=MidpointNormalize(midpoint = t_otsu, 247 | vmin=min, vmax=max)) 248 | 249 | # Overlay shoreline 250 | ax.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize = 0.3) 251 | 252 | # Plot transects 253 | for pf in transects.values(): 254 | ax.plot(pf[:,0], pf[:,1], color = 'k', linestyle = ':') 255 | 256 | # Add colourbar 257 | cbar = fig.colorbar(cax, ax = ax, orientation='vertical', shrink=0.65) 258 | cbar.set_label(settings['water_index'] + ' Pixel Value', rotation=270, labelpad=10) 259 | 260 | # Figure settings 261 | ax.axis('off') 262 | ax.set_title(settings['water_index'], fontsize=10) 263 | 264 | 265 | #%% 266 | 267 | def histogram_plot(ax, vec, t_otsu, settings): 268 | 269 | # Set labels 270 | ax.set_title(settings['water_index'] + ' Pixel Value Histogram Thresholding', 271 | fontsize = 10) 272 | ax.set_xlabel(settings['water_index'] + ' Pixel Value', fontsize = 10) 273 | #ax.set_ylabel("Pixel Count", fontsize= 10) 274 | ax.set_ylabel("Pixel Class PDF", fontsize= 10) 275 | ax.axes.yaxis.set_ticks([]) 276 | 277 | # Plot threshold value(s) 278 | ax.axvline(x = t_otsu, color = 'k', linestyle = '--', label = 'Threshold Value') 279 | 280 | # Add legend 281 | ax.legend(bbox_to_anchor = (1,1), loc='lower right', framealpha = 1, 282 | fontsize = 8) #, fontsize = 'xx-small') 283 | 284 | # Plot histogram 285 | ax.hist(vec, settings['otsu_hist_bins'], color='blue', alpha=0.8, density=True) 286 | 287 | 288 | 289 | def histogram_plot_split(ax, index, im_classif, im_ref_buffer, t_otsu, settings, colours): 290 | 291 | # Set labels 292 | ax.set_title(settings['water_index'] + ' Pixel Value Histogram Thresholding', 293 | fontsize = 10) 294 | ax.set_xlabel(settings['water_index'] + ' Pixel Value', fontsize = 10) 295 | ax.set_ylabel("Pixel Class PDF", fontsize= 10) 296 | ax.axes.yaxis.set_ticks([]) 297 | # Plot threshold value(s) 298 | l1 = ax.axvline(x = t_otsu, color = 'k', linestyle = '--', label = 'threshold') 299 | 300 | # Add legend 301 | grey_patch = mpatches.Patch(color='0.5', label='other') 302 | ax.legend(handles=[grey_patch, l1], bbox_to_anchor = (1,1), loc='upper right', framealpha = 1, 303 | fontsize = 9) #, fontsize = 'xx-small') 304 | 305 | # Organise colours 306 | col_list = ['0.5', # Other 307 | colours[0,:], # Sand 308 | colours[1,:], # WW 309 | colours[2,:]] # Water 310 | 311 | # Plot histograms 312 | for i in [0,1,2,3]: 313 | # Extract mask 314 | class_im = im_classif == i 315 | mask_class = ~class_im 316 | mask_all = (mask_class + im_ref_buffer) >0 317 | 318 | # Copy index 319 | idx_copy = np.copy(index) 320 | 321 | # Remove nan 322 | if i == 0: 323 | idx_copy[mask_class] = np.nan 324 | else: 325 | idx_copy[mask_all] = np.nan 326 | 327 | # Create vec 328 | vec = idx_copy.reshape(idx_copy.shape[0] * idx_copy.shape[1]) 329 | vec = vec[~np.isnan(vec)] 330 | 331 | # Plot histogram 332 | ax.hist(vec, settings['otsu_hist_bins'], color=col_list[i], alpha=0.8, 333 | density=True) 334 | 335 | 336 | #%% 337 | 338 | def check_land_mask(settings): 339 | 340 | # Load the RGB image 341 | rgb_path = settings['georef_im_path'] 342 | with rasterio.open(rgb_path) as src_rgb: 343 | rgb_image = src_rgb.read([3, 2, 1]) # Read the RGB bands 344 | 345 | # Check the maximum value of the image 346 | max_value = rgb_image.max() 347 | 348 | # Normalize the RGB image if necessary (for visualization) 349 | if max_value > 1.0: 350 | rgb_image = rgb_image.astype(float) / max_value 351 | 352 | # Load the single-band mask image 353 | mask_path = settings['land_mask'] 354 | with rasterio.open(mask_path) as src_mask: 355 | mask_image = src_mask.read(1) # Read the single band 356 | 357 | # Normalize the mask (if necessary) to have values between 0 and 1 358 | mask_image = mask_image.astype(float) 359 | mask_image = mask_image / mask_image.max() 360 | 361 | # Invert the mask 362 | mask_image = 1 - mask_image 363 | 364 | # Apply the mask to the RGB image 365 | masked_rgb_image = np.copy(rgb_image) 366 | for i in range(3): # Apply mask on each channel (R, G, B) 367 | masked_rgb_image[i] = rgb_image[i] * mask_image 368 | 369 | # Plot the images 370 | fig, ax = plt.subplots(1, 3, figsize=(15, 5)) 371 | 372 | # Plot the original RGB image 373 | ax[0].imshow(np.moveaxis(rgb_image, 0, -1)) # Move the channels to the last dimension 374 | ax[0].set_title('Co-registration reference image') 375 | ax[0].axis('off') 376 | 377 | # Initialise classifier colours 378 | cmap = cm.get_cmap('tab20c') 379 | colorpalette = cmap(np.arange(0,13,1)) 380 | colours = np.zeros((3,4)) 381 | colours[0,:] = colorpalette[5] # sand 382 | colours[1,:] = np.array([150/255,1,1,1]) # ww 383 | colours[2,:] = np.array([0,91/255,1,1]) # water 384 | 385 | # classify image 386 | class_path = rgb_path.replace('_im_ref.tif', '_class.tif') 387 | with rasterio.open(class_path) as src: 388 | im_classif = src.read(1) 389 | 390 | # plot classified image 391 | rgb_image_reshaped = np.transpose(rgb_image, (1, 2, 0)) 392 | class_plot(ax[1], rgb_image_reshaped, im_classif, None, None, settings, colours, include_lines = False) 393 | ax[1].axis('off') 394 | ax[1].set_title('Classified image') 395 | 396 | # Plot the masked RGB image 397 | ax[2].imshow(np.moveaxis(masked_rgb_image, 0, -1)) # Move the channels to the last dimension 398 | ax[2].set_title('Land mask region') 399 | ax[2].axis('off') 400 | 401 | plt.show(block=False) 402 | 403 | # save image 404 | save_loc = settings['georef_im_path'].replace('.tif', '_and_land_mask_figure.png') 405 | plt.savefig(save_loc, bbox_inches='tight', dpi=200) 406 | 407 | 408 | def plot_inputs(settings): 409 | 410 | # Initialise figure 411 | fig, ax = plt.subplots(1, 2, figsize=(15, 5)) 412 | 413 | ############################## 414 | # Load the RGB image 415 | with rasterio.open(settings['ref_merge_im']) as src_rgb: 416 | rgb_image = src_rgb.read([3, 2, 1]) # Read the RGB bands 417 | rgb_transform = src_rgb.transform 418 | # Normalize the RGB image if necessary 419 | if rgb_image.max() > 1.0: 420 | rgb_image = rgb_image.astype(float) / rgb_image.max() 421 | # Plot the RGB image 422 | left, bottom, right, top = rasterio.transform.array_bounds(rgb_image.shape[1], rgb_image.shape[2], rgb_transform) 423 | 424 | ax[0].set_title('Georectified reference image') 425 | ax[0].imshow(np.moveaxis(rgb_image, 0, -1), extent=[left, right, bottom, top], zorder=0) 426 | 427 | ax[1].set_title('Run inputs') 428 | ax[1].imshow(np.moveaxis(rgb_image, 0, -1), extent=[left, right, bottom, top], zorder=0) 429 | 430 | ############################## 431 | # plot transects 432 | transects = settings['transects_load'] 433 | for i, ts in enumerate(transects): 434 | transect_coords = np.array(transects[ts]) 435 | if i==0: 436 | # plot transects 437 | ax[1].plot(transect_coords[:, 0], transect_coords[:, 1], color='darkred', lw=1.5, label='Transects', zorder=20) 438 | else: 439 | # plot transects 440 | ax[1].plot(transect_coords[:, 0], transect_coords[:, 1], color='darkred', lw=1.5, zorder=20) 441 | # plot transect origin 442 | ax[1].scatter(transect_coords[0, 0], transect_coords[0, 1], facecolor='black', edgecolor='white', linewidth=0.5, marker='o', zorder=30) 443 | 444 | ############################## 445 | # import ref sl 446 | ref_shoreline = np.array(settings['reference_shoreline']) 447 | # plot ref sl 448 | ax[1].plot(ref_shoreline[:, 0], ref_shoreline[:, 1], color='black', lw=2, linestyle=':', label='Ref SL') 449 | # add buffer around ref sl 450 | shoreline_buffer = LineString(ref_shoreline).buffer(settings['max_dist_ref'] ) # Create the buffer 451 | # Plot the buffered area around the shoreline 452 | buffered_patch = mpatches.Polygon( 453 | np.array(shoreline_buffer.exterior.coords), 454 | closed=True, 455 | edgecolor='black', 456 | facecolor=mcolors.to_rgba('white', alpha=0.1), 457 | lw=1, 458 | label='Ref SL buffer', 459 | zorder=15) 460 | ax[1].add_patch(buffered_patch) 461 | 462 | ############################## 463 | # import aoi 464 | with open(settings['aoi_geojson'], 'r') as f: 465 | geojson_data = json.load(f) 466 | polygon = shape(geojson_data['features'][0]['geometry']) 467 | transformer = Transformer.from_crs("EPSG:4326", settings['output_epsg'], always_xy=True) 468 | projected_polygon = transform(transformer.transform, polygon) 469 | # plot AOI 470 | polygon_patch = mpatches.Polygon( 471 | np.array(projected_polygon.exterior.coords), 472 | closed=True, 473 | edgecolor='red', 474 | linestyle='-', 475 | facecolor='none', 476 | lw=1.5, 477 | label='AOI', 478 | zorder=10) 479 | ax[1].add_patch(polygon_patch) 480 | 481 | ############################## 482 | # Get input bounds 483 | shoreline_bounds = shoreline_buffer.bounds 484 | aoi_bounds = projected_polygon.bounds 485 | # Get transects bounds 486 | transect_bounds = [np.array(transects[ts]).T for ts in transects] # Extract all coordinates 487 | transect_minx = min([coords[0].min() for coords in transect_bounds]) 488 | transect_maxx = max([coords[0].max() for coords in transect_bounds]) 489 | transect_miny = min([coords[1].min() for coords in transect_bounds]) 490 | transect_maxy = max([coords[1].max() for coords in transect_bounds]) 491 | 492 | # Combine all bounds (find the overall min and max for both x and y) 493 | minx = min(aoi_bounds[0], shoreline_bounds[0], transect_minx, left) 494 | miny = min(aoi_bounds[1], shoreline_bounds[1], transect_miny, bottom) 495 | maxx = max(aoi_bounds[2], shoreline_bounds[2], transect_maxx, right) 496 | maxy = max(aoi_bounds[3], shoreline_bounds[3], transect_maxy, top) 497 | 498 | # Set the new limits for the axis 499 | padding_x = (maxx - minx) * 0.01 500 | padding_y = (maxy - miny) * 0.01 501 | for plot in [0, 1]: 502 | ax[plot].set_xlim(minx - padding_x, maxx + padding_x) 503 | ax[plot].set_ylim(miny - padding_y, maxy + padding_y) 504 | ax[plot].axis('off') 505 | 506 | # Plot params 507 | # ax[1].legend() 508 | ax[1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=4) 509 | plt.show(block=False) 510 | 511 | ############################## 512 | # save image 513 | save_loc = settings['georef_im_path'].replace('im_ref.tif', 'inputs_figure.png') 514 | plt.savefig(save_loc, bbox_inches='tight', dpi=200) 515 | 516 | -------------------------------------------------------------------------------- /coastsat_ps/postprocess.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pandas as pd 4 | from scipy import signal 5 | import copy 6 | import matplotlib.ticker as ticker 7 | import matplotlib.dates as mdates 8 | import os 9 | 10 | from coastsat_ps.preprocess_tools import create_folder 11 | 12 | 13 | #%% 14 | 15 | def get_closest_datapoint(dates, dates_ts, values_ts): 16 | """ 17 | Extremely efficient script to get closest data point to a set of dates from a very 18 | long time-series (e.g., 15-minutes tide data, or hourly wave data) 19 | 20 | Make sure that dates and dates_ts are in the same timezone (also aware or naive) 21 | 22 | KV WRL 2020 23 | 24 | Arguments: 25 | ----------- 26 | dates: list of datetimes 27 | dates at which the closest point from the time-series should be extracted 28 | dates_ts: list of datetimes 29 | dates of the long time-series 30 | values_ts: np.array 31 | array with the values of the long time-series (tides, waves, etc...) 32 | 33 | Returns: 34 | ----------- 35 | values: np.array 36 | values corresponding to the input dates 37 | 38 | """ 39 | 40 | # check if the time-series cover the dates 41 | if dates[0] < dates_ts[0] or dates[-1] > dates_ts[-1]: 42 | raise Exception('Time-series do not cover the range of your input dates') 43 | 44 | # get closest point to each date (no interpolation) 45 | temp = [] 46 | def find(item, lst): 47 | start = 0 48 | start = lst.index(item, start) 49 | return start 50 | for i,date in enumerate(dates): 51 | print('\rExtracting closest tide to PS timestamps: %d%%' % int((i+1)*100/len(dates)), end='') 52 | temp.append(values_ts[find(min(item for item in dates_ts if item > date), dates_ts)]) 53 | values = np.array(temp) 54 | 55 | return values 56 | 57 | 58 | #%% Tidal correction 59 | 60 | def tidal_correction(settings, tide_settings, sl_csv): 61 | 62 | # Initialise 63 | if type(tide_settings['beach_slope']) is list: 64 | if len(tide_settings['beach_slope']) != len(settings['transects_load'].keys()): 65 | raise Exception('Beach slope list length does not match number of transects') 66 | 67 | # unpack settings 68 | weight = tide_settings['weighting'] 69 | contour = tide_settings['contour'] 70 | offset = tide_settings['offset'] 71 | mindate = tide_settings['date_min'] 72 | maxdate = tide_settings['date_max'] 73 | 74 | # import sl data 75 | sl_csv_tide = copy.deepcopy(sl_csv) 76 | sl_csv_tide.loc[:,'Date'] = pd.to_datetime(sl_csv_tide.loc[:,'Date'], utc = True) 77 | 78 | # Filter by date 79 | sl_csv_tide = sl_csv_tide[sl_csv_tide['Date'] > pd.to_datetime(mindate, utc = True)] 80 | sl_csv_tide = sl_csv_tide[sl_csv_tide['Date'] < pd.to_datetime(maxdate, utc = True)] 81 | 82 | # Filter by filter 83 | sl_csv_tide = sl_csv_tide[sl_csv_tide['Filter'] == 1] 84 | 85 | # Import tide daa 86 | tide_data = pd.read_csv(os.path.join(settings['user_input_folder'], settings['tide_data']), parse_dates=['dates']) 87 | dates_ts = [_.to_pydatetime() for _ in tide_data['dates']] 88 | tides_ts = np.array(tide_data['tide']) 89 | 90 | # get tide levels corresponding to the time of image acquisition 91 | dates_sat = sl_csv_tide['Date'].to_list() 92 | sl_csv_tide['Tide'] = get_closest_datapoint(dates_sat, dates_ts, tides_ts) 93 | 94 | # Perform correction for each transect 95 | for i, ts in enumerate(settings['transects_load'].keys()): 96 | # Select beach slope 97 | if type(tide_settings['beach_slope']) is not list: 98 | beach_slope = tide_settings['beach_slope'] 99 | else: 100 | beach_slope = tide_settings['beach_slope'][i] 101 | 102 | # Select ts data 103 | ps_data = copy.deepcopy(sl_csv_tide[['Date',ts, 'Tide']]) 104 | ps_data = ps_data.set_index('Date') 105 | 106 | # apply correction 107 | correction = weight*(ps_data['Tide']-contour)/beach_slope + offset 108 | sl_csv_tide.loc[:, ts] += correction.values 109 | 110 | # Plot tide matching 111 | fig, ax = plt.subplots(1,1,figsize=(15,4), tight_layout=True) 112 | ax.grid(which='major', linestyle=':', color='0.5') 113 | ax.plot(dates_ts, tides_ts, '-', color='0.6', label='all time-series') 114 | ax.plot(dates_sat, sl_csv_tide['Tide'], '-o', color='k', ms=6, mfc='w',lw=1, label='image acquisition') 115 | ax.set(ylabel='tide level [m]',xlim=[dates_sat[0],dates_sat[-1]], title='Water levels at the time of image acquisition'); 116 | ax.legend(); 117 | plt.show(block=False) 118 | plt.savefig(settings['sl_transect_csv'].replace('.csv', '_tide_time_plot.png'), bbox_inches='tight', dpi=300) 119 | 120 | # save csv 121 | sl_csv_tide = sl_csv_tide.round(2) 122 | sl_csv_tide.to_csv(settings['sl_transect_csv'].replace('.csv', '_tide_corr.csv')) 123 | 124 | return sl_csv_tide 125 | 126 | 127 | #%% Single transect plot 128 | 129 | def ts_plot_single(settings, sl_csv, transect, savgol, x_scale): 130 | 131 | # import PS data and remove nan 132 | ps_data = copy.deepcopy(sl_csv[['Date',transect]]) 133 | ps_data.loc[:,'Date'] = pd.to_datetime(sl_csv.loc[:,'Date'], utc = True) 134 | ps_data = ps_data.set_index('Date') 135 | ps_data = ps_data[np.isfinite(ps_data[transect])] 136 | mean_ps = np.nanmean(ps_data[transect]) 137 | 138 | # Initialise figure 139 | fig = plt.figure(figsize=(8,3)) 140 | ax = fig.add_subplot(111) 141 | ax.set_title(settings['site_name'] + ' Transect ' + transect + ' Timeseries Plot') 142 | ax.set(ylabel='Chainage [m]') 143 | ax.set(xlabel='Date [UTC]') 144 | 145 | # Mean Position line 146 | l2 = ax.axhline(y = mean_ps, color='k', linewidth=0.75, label='Mean PS Position', zorder = 2) 147 | 148 | # Number of days 149 | no_days = (max(ps_data.index)-min(ps_data.index)).days 150 | 151 | #savgol = False 152 | if savgol == True: 153 | if no_days < 16: 154 | raise Exception('SavGol filter requires >15 days in timeseries') 155 | 156 | # PS plot 157 | l1 = ax.fill_between(ps_data.index, ps_data[transect], y2 = mean_ps, alpha = 0.35, color = 'grey', label='PS Data', zorder = 3) 158 | #l1 = ax.scatter(ps_data.index, ps_data[transect], color = 'k', label='PS Data', marker = 'x', s = 10, linewidth = 0.5, zorder = 1)#, alpha = 0.75) 159 | l1, = ax.plot(ps_data.index, ps_data[transect], linewidth = 0.75, alpha = 0.4, color = 'k', label='PS Data', zorder = 4) 160 | 161 | # savgol plot rolling mean 162 | roll_days = 15 163 | interp_PL = pd.DataFrame(ps_data.resample('D').mean().interpolate(method = 'linear')) 164 | interp_PL_sav = signal.savgol_filter(interp_PL[np.isfinite(interp_PL)][transect], roll_days, 2) 165 | l3, = ax.plot(interp_PL.index, interp_PL_sav, linewidth = 0.75, alpha = 0.7, color = 'r', label=str(roll_days) + ' Day SavGol Filter', zorder = 5) 166 | #l3 = ax.fill_between(interp_PL.index, interp_PL[ts], y2 = mean_GT, alpha = 0.35, color = 'grey', label=str(roll_days) + ' Day SavGol Filter', zorder = 0) 167 | 168 | # Set legend 169 | ax.legend(handles = [l1, l2, l3], ncol = 3, bbox_to_anchor = (0,1), loc='upper left', framealpha = 1, fontsize = 'xx-small') 170 | else: 171 | # PS plot 172 | l1 = ax.fill_between(ps_data.index, ps_data[transect], y2 = mean_ps, alpha = 0.25, color = 'grey', label='PS Data', zorder = 3) 173 | l1, = ax.plot(ps_data.index, ps_data[transect], linewidth = 0.75, alpha = 0.6, color = 'k', label='PS Data', zorder = 4) 174 | 175 | # Set legend 176 | ax.legend(handles = [l1, l2], ncol = 3, bbox_to_anchor = (0,1), loc='upper left', framealpha = 1, fontsize = 'xx-small') 177 | 178 | 179 | # Find axis bounds 180 | if abs(max(ps_data[transect]))-mean_ps > mean_ps-abs(min(ps_data[transect])): 181 | bound = abs(max(ps_data[transect]))-mean_ps+5 182 | else: 183 | bound = mean_ps-abs(min(ps_data[transect]))+5 184 | 185 | # Set axis limits 186 | ax.set_ylim(top = mean_ps + bound) 187 | ax.set_ylim(bottom = mean_ps - bound) 188 | 189 | ax.set_xlim([min(ps_data.index)-(max(ps_data.index)-min(ps_data.index))/40, 190 | max(ps_data.index)+(max(ps_data.index)-min(ps_data.index))/40]) 191 | 192 | # Set grid and axis label ticks 193 | ax.grid(b=True, which='major', linestyle='-') 194 | ax.yaxis.set_major_locator(ticker.MultipleLocator(5)) 195 | ax.tick_params(labelbottom=False, bottom = False) 196 | ax.tick_params(axis = 'y', which = 'major', labelsize = 6) 197 | 198 | if x_scale == 'years': 199 | ax.xaxis.set_major_locator(mdates.YearLocator()) 200 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y')) 201 | #ax.set_yticklabels([]) 202 | ax.tick_params(labelbottom=True, bottom = True) 203 | ax.xaxis.set_minor_locator(mdates.MonthLocator()) 204 | elif x_scale == 'months': 205 | if no_days > 200: 206 | raise Exception('Too many dates to render months properly, try x_ticks = years') 207 | else: 208 | ax.xaxis.set_major_locator(mdates.MonthLocator()) 209 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%b')) 210 | ax.tick_params(labelbottom=True, bottom = True) 211 | ax.xaxis.set_minor_locator(mdates.DayLocator()) 212 | elif x_scale == 'days': 213 | if no_days > 100: 214 | raise Exception('Too many dates to render days properly, try x_ticks = years') 215 | elif no_days > 60: 216 | raise Exception('Too many dates to render days properly, try x_ticks = months') 217 | else: 218 | ax.xaxis.set_major_locator(mdates.MonthLocator()) 219 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%b')) 220 | ax.tick_params(labelbottom=True, bottom = True) 221 | ax.xaxis.set_minor_locator(mdates.DayLocator()) 222 | ax.xaxis.set_minor_formatter(mdates.DateFormatter('%d')) 223 | else: 224 | raise Exception('Select either years, months or days for x_scale input') 225 | 226 | 227 | # save plot 228 | save_folder = os.path.join(settings['sl_thresh_ind'], 'Timeseries Plots') 229 | create_folder(save_folder) 230 | fig.tight_layout() 231 | save_file = os.path.join(save_folder, 'transect_' + transect + '_timeseries.png') 232 | fig.savefig(save_file, dpi=200) 233 | 234 | plt.show(block=False) 235 | 236 | 237 | 238 | -------------------------------------------------------------------------------- /coastsat_ps/preprocess_tools.py: -------------------------------------------------------------------------------- 1 | # Tools for pre-processing functions 2 | # YD, Sep 2020 3 | 4 | import pathlib 5 | import rasterio 6 | import os 7 | import subprocess 8 | import numpy as np 9 | import skimage.morphology as morphology 10 | from shutil import copyfile 11 | 12 | from coastsat_ps.shoreline_tools import classify_single 13 | 14 | #from shapely.geometry import box 15 | from arosics import COREG_LOCAL, DESHIFTER, COREG 16 | from xml.dom import minidom 17 | 18 | #%% General Functions 19 | 20 | def create_folder(filepath): 21 | ''' Creates a filepath if it doesn't already exist 22 | Will not overwrite files that exist 23 | Assign filepath string to a variable 24 | ''' 25 | pathlib.Path(filepath).mkdir(exist_ok=True) 26 | return filepath 27 | 28 | 29 | #%% UDM load functions 30 | 31 | def load_udm(udm_filename): 32 | '''Load single-band bit-encoded UDM as a 2D array 33 | 34 | Source: 35 | https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/udm/udm.ipynb 36 | 37 | ''' 38 | with rasterio.open(udm_filename, 'r') as src: 39 | udm = src.read()[0,...] 40 | return udm 41 | 42 | 43 | def udm_to_mask(udm_array, bit_string): 44 | '''Create a mask from the udm, masking only pixels with RGB quality concerns 45 | 46 | Source: 47 | https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/udm/udm.ipynb 48 | 49 | ''' 50 | test_bits = int(bit_string,2) 51 | bit_matches = udm_array & test_bits # bit-wise logical AND operator (&) 52 | return bit_matches != 0 # mask any pixels that match test bits 53 | 54 | 55 | #%% Mask manipulation functions 56 | 57 | def save_mask(settings, udm_filepath, save_path, bit_string, cloud_issue = False, nan_issue = False): 58 | 59 | ''' Save extracted mask ''' 60 | 61 | # open udm file 62 | with rasterio.open(udm_filepath, 'r') as src: 63 | udm = src.read()[0,...] 64 | kwargs = src.meta 65 | 66 | # Create mask 67 | mask = udm_to_mask(udm, bit_string) 68 | 69 | # Remove small elements from cloud mask (misdetections) 70 | if cloud_issue: 71 | # Remove cloud pixels that form thin features. These are beach, building or swash pixels that are 72 | # erroneously identified as clouds by UDM cloud detection algorithm 73 | if sum(sum(mask)) > 0 and sum(sum(~mask)) > 0: 74 | # Remove long/thin mask elements (often WW) 75 | elem = morphology.square(25) # use a square of width 25 pixels (75m) 76 | mask = morphology.binary_opening(mask,elem) # perform image opening 77 | 78 | # remove objects with less than 25*75 connected pixels (~75m*225m = 16,875m^2) 79 | morphology.remove_small_objects(mask, min_size=25*75, connectivity=1, in_place=True) 80 | 81 | if nan_issue: 82 | # Remove nan pixels that form thin features 83 | if sum(sum(mask)) > 0 and sum(sum(~mask)) > 0: 84 | # Remove long/thin mask elements 85 | elem = morphology.square(5) # use a square of width 5 pixels (15m) 86 | mask = morphology.binary_opening(mask,elem) # perform image opening 87 | 88 | # remove small objects 89 | morphology.remove_small_objects(mask, min_size=500, connectivity=2, in_place=True) 90 | # remove 91 | 92 | # save mask 93 | with rasterio.open(save_path, 'w', **kwargs) as dst: 94 | dst.write_band(1, mask.astype(rasterio.uint8)) 95 | 96 | # arosics workaround 97 | if settings['arosics_reproject'] == True: 98 | crs_in = str(src.crs).replace('epsg', 'EPSG') 99 | if crs_in != settings['output_epsg']: 100 | if 'NaN_mask' in save_path: 101 | nan_val = '1' 102 | else: 103 | nan_val = '0' 104 | raster_change_epsg(settings, save_path, nan_val, crs_in) 105 | 106 | 107 | def get_cloud_percentage_nan_cloud(nan_path, cloud_mask_path): 108 | # import nan and cloud masks 109 | nan_binary = load_udm(nan_path) 110 | cloud_binary = load_udm(cloud_mask_path) 111 | # calculate cover percentage 112 | cloud_perc = round( 113 | np.sum(cloud_binary) / 114 | ( cloud_binary.shape[1]*cloud_binary.shape[0] - np.sum(nan_binary) ) 115 | *100,2) 116 | 117 | return cloud_perc 118 | 119 | 120 | def get_file_extent(nan_path): 121 | nan_mask = load_udm(nan_path) 122 | aoi_area = nan_mask.shape[1]*nan_mask.shape[0] 123 | nan_area = np.sum(nan_mask) 124 | image_extent = 100-(nan_area/aoi_area)*100 125 | 126 | return image_extent 127 | 128 | 129 | def zero_to_nan(im_path, nan_path, faulty_pixels = True, write = True): 130 | 131 | ''' Add zero values in image bands to nan mask ''' 132 | 133 | # Open mask 134 | with rasterio.open(nan_path, 'r') as src: 135 | nan_mask = src.read()[0,...] 136 | kwargs = src.meta 137 | 138 | # Initialise update mask 139 | mask_out = np.zeros((nan_mask.shape[0], nan_mask.shape[1]), dtype = int) 140 | 141 | # for each band, find zero values and add to nan_mask 142 | for i in [1,2,3,4]: 143 | # open band 144 | with rasterio.open(im_path, 'r') as src1: 145 | im_band = src1.read(i) 146 | 147 | # boolean of band pixels with zero value 148 | im_zero = im_band == 0 149 | 150 | # update mask_out 151 | mask_out += im_zero 152 | 153 | # Update with faulty pixels if desierd 154 | if faulty_pixels: 155 | mask_out += nan_mask 156 | 157 | # Convert mask_out to boolean 158 | mask_out = mask_out>0 159 | 160 | # Overwrite original nan_mask with new nan_mask 161 | if write: 162 | with rasterio.open(nan_path, 'w', **kwargs) as dst: 163 | dst.write_band(1, mask_out.astype(rasterio.uint8)) 164 | else: 165 | return mask_out 166 | 167 | 168 | #%% GDAL subprocesses 169 | 170 | def gdal_subprocess(settings, gdal_command_in, command_list): 171 | ''' Python access to all GDAL commands 172 | 173 | Inputs: 174 | 175 | settings - CoastSat.PlanetScope settings dictionary 176 | 177 | gdal_command_in - name of command (may be an executable or .py file) 178 | .py extension must be provided if present 179 | 180 | command_list - string list of commands (after GDAL function call) 181 | 182 | print_output - write any text to print GDAL output information captured 183 | by subprocess.check_output 184 | 185 | 186 | Unlike in terminal, full filepath of gdal command must be provided for subcommand 187 | ie. 'gdalsrsinfo' needs filepath.../gdalsrsinfo 188 | Filepath is hard coded in and may need to be changed for user 189 | 190 | Subprocess commands need to be given as a list of strings 191 | 192 | Function example for " gdalsrsinfo -o xml in_DEM.tif " 193 | gdal_subprocess('gdalsrsinfo', ["-o", "xml", "in_dem.tif"], 'yes') 194 | 195 | ''' 196 | 197 | #gdal_loc = [os.path.join('/anaconda2/envs/coastsat/bin/', gdal_command_in)] #removed 30/5/2021 198 | gdal_loc = [os.path.join(settings['GDAL_location'], gdal_command_in)] 199 | 200 | gdal_command = gdal_loc + command_list 201 | gdal_output = subprocess.check_call(gdal_command) 202 | if gdal_output != 0: 203 | gdal_output = subprocess.check_output(gdal_command) 204 | print(gdal_output) 205 | 206 | 207 | def merge_crop(settings, files_list, file_out_name, epsg_in = False, nan_mask = False): 208 | ''' Merge and crop list of images with gdalwarp 209 | Note - second file is output in areas of overlap''' 210 | 211 | if epsg_in == False: 212 | epsg_in = settings['output_epsg'] 213 | 214 | if nan_mask == True: 215 | no_data_out_val = "1" 216 | no_data_in_val = "1" 217 | else: 218 | no_data_out_val = "0" 219 | no_data_in_val = "0" 220 | 221 | filepath_out = [os.path.join(settings['merge_out'], file_out_name)] 222 | 223 | command_list = ["-s_srs", epsg_in, 224 | "-t_srs", settings['output_epsg'], 225 | "-r", settings['gdal_method'], 226 | "-of", "GTiff", 227 | "-cutline",settings['aoi_geojson'], 228 | "-srcnodata", no_data_in_val, 229 | "-dstnodata", no_data_out_val, 230 | "-crop_to_cutline", 231 | "-overwrite"] 232 | 233 | command_list = command_list + files_list + filepath_out 234 | 235 | # run proces (seconds) 236 | gdal_subprocess(settings, 'gdalwarp', command_list) 237 | 238 | 239 | #%% Georectification functions using AROSICS 240 | 241 | def get_raster_bounds(file): 242 | 243 | ''' Find raster bounding parameters ''' 244 | 245 | dataset = rasterio.open(file) 246 | 247 | bounds = [ 248 | dataset.bounds.left, 249 | dataset.bounds.top, 250 | dataset.bounds.right, 251 | dataset.bounds.bottom 252 | ] 253 | 254 | return bounds 255 | 256 | # def get_raster_corners(file): 257 | 258 | # bound_im = rasterio.open(file) 259 | # bounds = bound_im.bounds 260 | # geom = box(*bounds) 261 | # corners = str(geom.wkt) 262 | 263 | # return corners 264 | 265 | 266 | def global_coreg(im_reference, im_target, im_out_global, 267 | land_mask_ref = None, land_mask_tgt = None, 268 | ws = (400,400), 269 | q = True, progress = False, ignore_errors = True): 270 | 271 | # Global coregistration 272 | CR = COREG(im_reference, im_target, 273 | path_out = im_out_global, fmt_out = 'GTiff', 274 | ws = ws, 275 | 276 | # Data mask settings 277 | mask_baddata_ref = land_mask_ref, 278 | mask_baddata_tgt = land_mask_tgt, 279 | 280 | # Console output settings 281 | q = q, progress = progress, ignore_errors = ignore_errors, 282 | 283 | # Hard coded settings 284 | nodata = (0,0), 285 | #align_grids = True, 286 | ) 287 | 288 | #CR.calculate_spatial_shifts() 289 | #CR.show_matchWin() 290 | 291 | CR.correct_shifts() 292 | 293 | # Determine success 294 | coreg_success = CR.coreg_info['success'] 295 | 296 | if (coreg_success == False): # or (CR.ssim_improved == False): 297 | copyfile(im_target, im_out_global) 298 | print('Coregistration failed, raw image copied instead') 299 | #coreg_success = False 300 | 301 | elif coreg_success: 302 | # Calculate image shift 303 | shift_m = np.sqrt(CR.coreg_info['corrected_shifts_map']['y']**2 + 304 | CR.coreg_info['corrected_shifts_map']['x']**2) 305 | print('Shift of ' + str(round(shift_m,2)) + 'm') 306 | 307 | return CR.coreg_info, coreg_success 308 | 309 | 310 | def local_coreg(im_reference, im_target, im_out_local, 311 | land_mask_ref = None, land_mask_tgt = None, 312 | grid_res = 100, window_size = (256,256), 313 | min_points = 5, 314 | #footprint_poly_ref=None, footprint_poly_tgt=None, 315 | q = True, progress = False, ignore_errors = True, 316 | filter_level = 2): 317 | 318 | # Global coregistration 319 | CRL = COREG_LOCAL(im_reference,im_target, 320 | path_out = im_out_local, fmt_out = 'GTiff', 321 | grid_res = grid_res, window_size = window_size, 322 | tieP_filter_level = filter_level, 323 | 324 | # Data mask settings 325 | mask_baddata_ref = land_mask_ref, 326 | mask_baddata_tgt = land_mask_tgt, 327 | 328 | # Console output settings 329 | q = q, progress = progress, ignore_errors = ignore_errors, 330 | 331 | # Hard coded settings 332 | nodata = (0,0), 333 | min_reliability = 50, 334 | #rs_max_outlier = 10, 335 | #r_b4match = 1, 336 | #s_b4match = 1, 337 | #align_grids = True, 338 | #footprint_poly_ref = footprint_poly_ref, 339 | #footprint_poly_tgt = footprint_poly_tgt, 340 | ) 341 | 342 | #CRL.view_CoRegPoints() 343 | #CRL.tiepoint_grid.to_PointShapefile(path_out=im_out_local.replace('.tif','.shp')) 344 | #CRL.view_CoRegPoints_folium().save(im_out_local.replace('.tif','.html')) 345 | 346 | # Correct image 347 | # High min # points creates shift based on average of x/y shifts only 348 | CRL.correct_shifts(min_points_local_corr = min_points) 349 | 350 | # Determine success 351 | coreg_success = CRL.coreg_info['success'] 352 | 353 | if coreg_success == False: 354 | copyfile(im_target, im_out_local) 355 | print('Coregistration failed, raw image copied instead') 356 | 357 | coreg_info_out = CRL.coreg_info 358 | 359 | return coreg_info_out, coreg_success 360 | 361 | 362 | def mask_coreg(settings, im_target_mask, cr_param, mask_out_path, 363 | min_points = 5, coreg_success = False, q = True, progress = False): 364 | 365 | if coreg_success == False: 366 | copyfile(im_target_mask, mask_out_path) 367 | 368 | else: 369 | if settings['im_coreg'] == 'Global Coreg': 370 | # Apply cr shift to mask 371 | DESHIFTER(im_target_mask, cr_param, 372 | path_out = mask_out_path, fmt_out = 'GTiff', 373 | nodata = 255, # if 0 or 1 doesn't shoft properly 374 | q = q, progress = progress 375 | #align_grids = True, 376 | ).correct_shifts() 377 | 378 | elif settings['im_coreg'] == 'Local Coreg': 379 | # Apply cr shift to mask 380 | DESHIFTER(im_target_mask, cr_param, 381 | path_out = mask_out_path, fmt_out = 'GTiff', 382 | nodata = 255, # if 0 or 1 doesn't shoft properly 383 | q = q, progress = progress, 384 | min_points_local_corr = min_points 385 | #align_grids = True, 386 | ).correct_shifts() 387 | 388 | # Set no data vals as zero again 389 | with rasterio.open(mask_out_path, 'r') as src: 390 | mask = src.read()[0,...] 391 | kwargs = src.meta 392 | 393 | # Boolean of mask vals = 1 394 | mask = mask == 1 395 | 396 | # Overwrite original nan_mask with new nan_mask 397 | with rasterio.open(mask_out_path, 'w', **kwargs) as dst: 398 | dst.write_band(1, mask.astype(rasterio.uint8)) 399 | 400 | 401 | def create_land_mask(settings, toa_path, save_loc, nan_path = False, raw_mask = False, save_class = False): 402 | 403 | # Classify image 404 | if nan_path == False: 405 | im_classif = classify_single(settings['land_classifier_load'], 406 | settings, toa_path, no_mask = True) 407 | else: 408 | if type(raw_mask) is bool: 409 | if raw_mask == False: 410 | im_classif = classify_single(settings['land_classifier_load'], 411 | settings, toa_path, no_mask = False) 412 | else: 413 | print('mask_comb needs to be an array or False') 414 | else: 415 | im_classif = classify_single(settings['land_classifier_load'], 416 | settings, toa_path, no_mask = False, 417 | raw_mask = raw_mask) 418 | 419 | # Extract mask of non-other pixels 420 | other_mask = im_classif == 0 421 | 422 | # Remove non land pixels less than 30*30m (ie single whitewater pixels) 423 | other_mask = other_mask == 0 424 | other_mask = morphology.remove_small_objects(other_mask, 425 | min_size=30*30/9, 426 | connectivity=1) 427 | other_mask = other_mask == 0 428 | 429 | # Remove small land features then smooth the boundary 430 | elem = morphology.square(settings['land_mask_smoothing_1']) # use a square of width 10 pixels (30m) 431 | other_mask = morphology.binary_opening(other_mask,elem) # perform image opening 432 | other_mask = morphology.remove_small_objects(other_mask, 433 | min_size=settings['min_beach_area_pixels'], 434 | connectivity=1) 435 | 436 | # Remove small non land features again and smooth 437 | other_mask = other_mask == 0 438 | elem = morphology.square(settings['land_mask_smoothing_2']) # use a square of width 15 pixels (45m) 439 | other_mask = morphology.binary_opening(other_mask,elem) # perform image opening 440 | other_mask = morphology.remove_small_objects(other_mask, 441 | min_size=settings['min_beach_area_pixels'], 442 | connectivity=1) 443 | 444 | # Find geo kwargs 445 | if nan_path == False: 446 | with rasterio.open(toa_path, 'r') as src: 447 | kwargs = src.meta 448 | kwargs.update( 449 | dtype=rasterio.uint8, 450 | count = 1) 451 | else: 452 | with rasterio.open(nan_path) as src: 453 | kwargs = src.meta 454 | 455 | # save mask 456 | with rasterio.open(save_loc, 'w', **kwargs) as dst: 457 | dst.write_band(1, other_mask.astype(rasterio.uint8)) 458 | 459 | # save land mask class 460 | if save_class: 461 | # Save im_classif in TOA folder 462 | with rasterio.open(save_loc.replace('_land_mask.tif', '_class.tif'), 'w', **kwargs) as dst: 463 | dst.write_band(1, im_classif.astype(rasterio.uint8)) 464 | 465 | 466 | #%% XML file functions 467 | 468 | def get_epsg(output_dict, date, raw_toa_filename): 469 | 470 | ''' Gets image epsg from xml file ''' 471 | 472 | # Find corresponding xml file 473 | search_id = raw_toa_filename[9:20] 474 | for ii in range(len(output_dict['downloads_map'][date]['_metadata_clip.xml']['filenames'])): 475 | if output_dict['downloads_map'][date]['_metadata_clip.xml']['filenames'][ii][9:20] == search_id: 476 | xml_path = output_dict['downloads_map'][date]['_metadata_clip.xml']['filepaths'][ii] 477 | 478 | # open xml file 479 | xmldoc = minidom.parse(xml_path) 480 | 481 | # find epsg 482 | epsg = xmldoc.getElementsByTagName("ps:epsgCode")[0].firstChild.data 483 | 484 | return epsg 485 | 486 | 487 | def raster_change_epsg(settings, filepath, no_data, crs_in): 488 | 489 | command_list = ["-s_srs", crs_in, 490 | "-t_srs", settings['output_epsg'], 491 | "-r", settings['gdal_method'], 492 | "-of", "GTiff", 493 | "-cutline", settings['aoi_geojson'], 494 | "-srcnodata", no_data, # 0 for regular, 1 for nan masks 495 | "-dstnodata", no_data, 496 | "-crop_to_cutline", 497 | "-overwrite"] 498 | 499 | command_list = command_list + [filepath] + [filepath.replace('.tif', '_temp.tif')] 500 | 501 | # run proces (seconds) 502 | gdal_subprocess(settings, 'gdalwarp', command_list) 503 | # delete and rename file 504 | os.remove(filepath) 505 | os.rename(filepath.replace('.tif', '_temp.tif'), filepath) 506 | 507 | 508 | def TOA_conversion(settings, image_path, xml_path, save_path): 509 | 510 | ''' 511 | 1) Convert DN values to Top of Atmosphere (TOA) 512 | 2) Add sensor type (PS2, PS2.SD, PSB.SD) to save filename 513 | 514 | Function modified from: 515 | https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/toar/toar_planetscope.ipynb 516 | 517 | ''' 518 | 519 | # Load image bands - note all PlanetScope 4-band images have band order BGRN 520 | with rasterio.open(image_path) as src: 521 | band_blue_radiance = src.read(1) 522 | 523 | with rasterio.open(image_path) as src: 524 | band_green_radiance = src.read(2) 525 | 526 | with rasterio.open(image_path) as src: 527 | band_red_radiance = src.read(3) 528 | 529 | with rasterio.open(image_path) as src: 530 | band_nir_radiance = src.read(4) 531 | 532 | 533 | ### Get TOA Factor ### 534 | xmldoc = minidom.parse(xml_path) 535 | nodes = xmldoc.getElementsByTagName("ps:bandSpecificMetadata") 536 | 537 | # XML parser refers to bands by numbers 1-4 538 | coeffs = {} 539 | for node in nodes: 540 | bn = node.getElementsByTagName("ps:bandNumber")[0].firstChild.data 541 | if bn in ['1', '2', '3', '4']: 542 | i = int(bn) 543 | value = node.getElementsByTagName("ps:reflectanceCoefficient")[0].firstChild.data 544 | coeffs[i] = float(value) 545 | 546 | #print("Conversion coefficients: {}".format(coeffs)) 547 | 548 | 549 | ### Convert to TOA ### 550 | 551 | # Multiply the Digital Number (DN) values in each band by the TOA reflectance coefficients 552 | band_blue_reflectance = band_blue_radiance * coeffs[1] 553 | band_green_reflectance = band_green_radiance * coeffs[2] 554 | band_red_reflectance = band_red_radiance * coeffs[3] 555 | band_nir_reflectance = band_nir_radiance * coeffs[4] 556 | 557 | #print("Red band radiance is from {} to {}".format(np.amin(band_red_radiance), np.amax(band_red_radiance))) 558 | #print("Red band reflectance is from {} to {}".format(np.amin(band_red_reflectance), np.amax(band_red_reflectance))) 559 | 560 | 561 | # find sensor name 562 | node = xmldoc.getElementsByTagName("eop:Instrument") 563 | sensor = node[0].getElementsByTagName("eop:shortName")[0].firstChild.data 564 | 565 | if sensor == 'PS2': 566 | save_path += '_PS2_TOA.tif' 567 | elif sensor == 'PS2.SD': 568 | save_path += '_2SD_TOA.tif' 569 | elif sensor == 'PSB.SD': 570 | save_path += '_BSD_TOA.tif' 571 | else: 572 | print('Error in detecting sensor name') 573 | 574 | ### Save output images ### 575 | 576 | # Set spatial characteristics of the output object to mirror the input 577 | kwargs = src.meta 578 | kwargs.update( 579 | dtype=rasterio.uint16, 580 | count = 4) 581 | 582 | #print("Before Scaling, red band reflectance is from {} to {}".format(np.amin(band_red_reflectance), np.amax(band_red_reflectance))) 583 | # Here we include a fixed scaling factor. This is common practice. 584 | scale = 10000 585 | blue_ref_scaled = scale * band_blue_reflectance 586 | green_ref_scaled = scale * band_green_reflectance 587 | red_ref_scaled = scale * band_red_reflectance 588 | nir_ref_scaled = scale * band_nir_reflectance 589 | 590 | #print("After Scaling, red band reflectance is from {} to {}".format(np.amin(red_ref_scaled), np.amax(red_ref_scaled))) 591 | 592 | # Write band calculations to a new raster file 593 | with rasterio.open(save_path, 'w', **kwargs) as dst: 594 | dst.write_band(1, blue_ref_scaled.astype(rasterio.uint16)) 595 | dst.write_band(2, green_ref_scaled.astype(rasterio.uint16)) 596 | dst.write_band(3, red_ref_scaled.astype(rasterio.uint16)) 597 | dst.write_band(4, nir_ref_scaled.astype(rasterio.uint16)) 598 | 599 | # Reproject all to output coordinate system 600 | if settings['arosics_reproject'] == True: 601 | crs_in = str(src.crs).replace('epsg', 'EPSG') 602 | if crs_in != settings['output_epsg']: 603 | raster_change_epsg(settings, save_path, '0', crs_in) 604 | 605 | 606 | 607 | 608 | 609 | -------------------------------------------------------------------------------- /coastsat_ps/shoreline_tools.py: -------------------------------------------------------------------------------- 1 | # Shoreline extraction function tools 2 | 3 | import rasterio 4 | import os 5 | import numpy as np 6 | import pandas as pd 7 | import skimage.filters as filters 8 | import skimage.morphology as morphology 9 | import skimage.measure as measure 10 | import geopandas as gpd 11 | 12 | from astropy.convolution import convolve 13 | from shapely.geometry import LineString 14 | from shapely import geometry 15 | 16 | 17 | from coastsat_ps.interactive import (convert_epsg, convert_world2pix, convert_pix2world, 18 | get_ps_no_mask, get_ps_data, get_im_ms) 19 | 20 | import warnings 21 | warnings.filterwarnings("ignore", category=RuntimeWarning) 22 | 23 | #%% Index calculations 24 | 25 | def calc_water_index(toa_path, settings, save_name): 26 | 27 | # Extract water index vals 28 | band1_no = settings['water_index_list'][0] 29 | band2_no = settings['water_index_list'][1] 30 | norm_bool = settings['water_index_list'][2] 31 | file_end = settings['water_index_list'][3] 32 | 33 | # import bands 34 | with rasterio.open(toa_path) as src: 35 | band_1_reflectance = src.read(band1_no) 36 | band_1 = band_1_reflectance/10000 37 | 38 | with rasterio.open(toa_path) as src: 39 | band_2_reflectance = src.read(band2_no) 40 | band_2 = band_2_reflectance/10000 41 | 42 | band_1[band_1 == 0] = np.nan 43 | band_2[band_2 == 0] = np.nan 44 | 45 | # Allow division by zero 46 | np.seterr(divide='ignore', invalid='ignore') 47 | 48 | # perform calculation 49 | if norm_bool: 50 | water_index = (band_1.astype(float) - band_2.astype(float)) / (band_1 + band_2) 51 | else: 52 | water_index = (band_1.astype(float) - band_2.astype(float)) 53 | 54 | # Set spatial characteristics of the output object to mirror the input 55 | kwargs = src.meta 56 | kwargs.update( 57 | dtype=rasterio.float32, 58 | count = 1) 59 | 60 | # Save image to new file 61 | save_file = os.path.join(settings['index_tif_out'], save_name + file_end) 62 | 63 | with rasterio.open(save_file, 'w', **kwargs) as dst: 64 | dst.write_band(1, water_index.astype(rasterio.float32)) 65 | 66 | 67 | #%% Thresholding 68 | 69 | def peak_fraction_thresh(vec_sand, vec_water, thresh_fraction, nBins): 70 | 71 | # Find peak water val 72 | water_vals = pd.DataFrame() 73 | water_vals['count'] = np.histogram(vec_water, bins = nBins)[0] 74 | water_vals['ind_vals'] = np.histogram(vec_water, bins = nBins)[1][0:-1] 75 | water_max = water_vals.loc[water_vals['count'].idxmax(), 'ind_vals'] 76 | 77 | # Find peak sand val 78 | sand_vals = pd.DataFrame() 79 | sand_vals['count'] = np.histogram(vec_sand, bins = nBins)[0] 80 | sand_vals['ind_vals'] = np.histogram(vec_sand, bins = nBins)[1][0:-1] 81 | sand_max = sand_vals.loc[sand_vals['count'].idxmax(), 'ind_vals'] 82 | 83 | # Create threshold 84 | threshold = water_max + thresh_fraction*(sand_max-water_max) 85 | 86 | return threshold 87 | 88 | 89 | def peak_fraction_generic(vec, thresh_fraction, nBins, multi_otsu = True): 90 | 91 | histogram_vals = pd.DataFrame() 92 | histogram_vals['count'] = np.histogram(vec, bins = nBins)[0] 93 | histogram_vals['ind_vals'] = np.histogram(vec, bins = nBins)[1][0:-1] 94 | 95 | if multi_otsu: 96 | t_otsu_all = filters.threshold_multiotsu(vec, nbins = nBins) 97 | mid_val = t_otsu_all[1] 98 | else: 99 | mid_val = filters.threshold_otsu(vec) 100 | 101 | vec_sand_half = histogram_vals[histogram_vals['ind_vals']>mid_val] 102 | sand_max = histogram_vals.loc[vec_sand_half['count'].idxmax(), 'ind_vals'] 103 | 104 | vec_water_half = histogram_vals[histogram_vals['ind_vals'] label = 1 209 | - whitewater (breaking waves and swash) --> label = 2 210 | - water --> label = 3 211 | - other (vegetation, buildings, rocks...) --> label = 0 212 | 213 | The classifier is a Neural Network that is already trained. 214 | 215 | KV WRL 2018 216 | 217 | Modified YD 2020 218 | 219 | Arguments: 220 | ----------- 221 | im_ms: np.array 222 | Pansharpened RGB + downsampled NIR and SWIR 223 | cloud_mask: np.array 224 | 2D cloud mask with True where cloud pixels are 225 | clf: joblib object 226 | pre-trained classifier 227 | 228 | Returns: 229 | ----------- 230 | im_classif: np.array 231 | 2D image containing labels 232 | Pixel Values: 233 | 0) Other, 1) Sand, 2) Whitewater, 3) Water 234 | 235 | """ 236 | 237 | # calculate features 238 | vec_features = calculate_features(im_ms, cloud_mask, np.ones(cloud_mask.shape).astype(bool)) 239 | vec_features[np.isnan(vec_features)] = 1e-9 # NaN values are create when std is too close to 0 240 | 241 | # remove NaNs and cloudy pixels 242 | vec_cloud = cloud_mask.reshape(cloud_mask.shape[0]*cloud_mask.shape[1]) 243 | vec_nan = np.any(np.isnan(vec_features), axis=1) 244 | vec_mask = np.logical_or(vec_cloud, vec_nan) 245 | vec_features = vec_features[~vec_mask, :] 246 | 247 | # classify pixels 248 | labels = clf.predict(vec_features) 249 | 250 | # recompose image 251 | vec_classif = np.nan*np.ones((cloud_mask.shape[0]*cloud_mask.shape[1])) 252 | vec_classif[~vec_mask] = labels 253 | im_classif = vec_classif.reshape((cloud_mask.shape[0], cloud_mask.shape[1])) 254 | 255 | return im_classif 256 | 257 | 258 | def create_shoreline_buffer(im_shape, georef, image_epsg, pixel_size, settings): 259 | """ 260 | Creates a buffer around the reference shoreline. The size of the buffer is 261 | given by settings['max_dist_ref']. 262 | 263 | KV WRL 2018 264 | 265 | Modified for PS by YD 2020 266 | 267 | Arguments: 268 | ----------- 269 | im_shape: np.array 270 | size of the image (rows,columns) 271 | georef: np.array 272 | vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale] 273 | image_epsg: int 274 | spatial reference system of the image from which the contours were extracted 275 | pixel_size: int 276 | size of the pixel in metres (15 for Landsat, 10 for Sentinel-2) 277 | settings: dict with the following keys 278 | 'output_epsg': int 279 | output spatial reference system 280 | 'reference_shoreline': np.array 281 | coordinates of the reference shoreline 282 | 'max_dist_ref': int 283 | maximum distance from the reference shoreline in metres 284 | 285 | Returns: 286 | ----------- 287 | im_buffer: np.array 288 | binary image, True where the buffer is, False otherwise 289 | 290 | """ 291 | # initialise the image buffer 292 | im_buffer = np.ones(im_shape).astype(bool) 293 | 294 | # convert reference shoreline to pixel coordinates 295 | ref_sl = settings['reference_shoreline'] 296 | out_epsg_int = int(settings['output_epsg'].replace('EPSG:','')) 297 | ref_sl_conv = convert_epsg(ref_sl, out_epsg_int, image_epsg)[:,:-1] 298 | ref_sl_pix = convert_world2pix(ref_sl_conv, georef) 299 | ref_sl_pix_rounded = np.round(ref_sl_pix).astype(int) 300 | 301 | # make sure that the pixel coordinates of the reference shoreline are inside the image 302 | idx_row = np.logical_and(ref_sl_pix_rounded[:,0] > 0, ref_sl_pix_rounded[:,0] < im_shape[1]) 303 | idx_col = np.logical_and(ref_sl_pix_rounded[:,1] > 0, ref_sl_pix_rounded[:,1] < im_shape[0]) 304 | idx_inside = np.logical_and(idx_row, idx_col) 305 | ref_sl_pix_rounded = ref_sl_pix_rounded[idx_inside,:] 306 | 307 | # create binary image of the reference shoreline (1 where the shoreline is 0 otherwise) 308 | im_binary = np.zeros(im_shape) 309 | for j in range(len(ref_sl_pix_rounded)): 310 | im_binary[ref_sl_pix_rounded[j,1], ref_sl_pix_rounded[j,0]] = 1 311 | im_binary = im_binary.astype(bool) 312 | 313 | # dilate the binary image to create a buffer around the reference shoreline 314 | max_dist_ref_pixels = np.ceil(settings['max_dist_ref']/pixel_size) 315 | se = morphology.disk(max_dist_ref_pixels) 316 | im_buffer = morphology.binary_dilation(im_binary, se) 317 | 318 | # Invert boolean (True is outside region) 319 | im_buffer = im_buffer == False 320 | 321 | return im_buffer 322 | 323 | 324 | #%% Contour extraction functions 325 | 326 | def sl_extract(masked_im, sand_mask, water_mask, masked_im_gen, settings): 327 | 328 | # Extract data 329 | thresh_type = settings['thresholding'] 330 | n_bins = settings['otsu_hist_bins'] 331 | thresh_fraction = settings['peak_fraction'] 332 | 333 | # Create sand vec 334 | sand_im = np.copy(masked_im) 335 | sand_im[sand_mask] = np.nan 336 | 337 | # Create water vec 338 | water_im = np.copy(masked_im) 339 | water_im[water_mask] = np.nan 340 | 341 | # Reshape to 1D array and remove any nan values 342 | vec_sand = sand_im.reshape(sand_im.shape[0] * sand_im.shape[1]) 343 | vec_water = water_im.reshape(water_im.shape[0] * water_im.shape[1]) 344 | 345 | # Remove nans from vecs 346 | vec_sand = vec_sand[~np.isnan(vec_sand)] 347 | vec_water = vec_water[~np.isnan(vec_water)] 348 | 349 | # Combine raw vecs 350 | int_all_raw = np.append(vec_water,vec_sand, axis=0) 351 | 352 | # make sure both classes have the same number of pixels before thresholding 353 | if len(vec_water) > 0 and len(vec_sand) > 0: 354 | if np.argmin([vec_sand.shape[0],vec_water.shape[0]]) == 1: 355 | vec_sand = vec_sand[np.random.choice(vec_sand.shape[0],vec_water.shape[0], replace=False)] 356 | else: 357 | vec_water = vec_water[np.random.choice(vec_water.shape[0],vec_sand.shape[0], replace=False)] 358 | 359 | # Threshold image 360 | if thresh_type == 'Otsu': 361 | # Combine vecs 362 | int_all = np.append(vec_water,vec_sand, axis=0) 363 | # Threshold 364 | t_otsu = filters.threshold_otsu(int_all) 365 | 366 | elif thresh_type == 'Peak Fraction': 367 | t_otsu = peak_fraction_thresh(vec_sand, vec_water, thresh_fraction, n_bins) 368 | 369 | # Contour image 370 | contours = measure.find_contours(masked_im_gen, t_otsu) 371 | 372 | # remove contours that contain NaNs 373 | contours_nonans = [] 374 | for k in range(len(contours)): 375 | if np.any(np.isnan(contours[k])): 376 | index_nan = np.where(np.isnan(contours[k]))[0] 377 | contours_temp = np.delete(contours[k], index_nan, axis=0) 378 | if len(contours_temp) > 1: 379 | contours_nonans.append(contours_temp) 380 | else: 381 | contours_nonans.append(contours[k]) 382 | contours = contours_nonans 383 | 384 | return contours, int_all_raw, t_otsu 385 | 386 | 387 | 388 | def sl_extract_generic(masked_im, settings): 389 | 390 | # Extract data 391 | thresh_type = settings['thresholding'] 392 | n_bins = settings['otsu_hist_bins'] 393 | thresh_fraction = settings['peak_fraction'] 394 | 395 | # Create vec 396 | vec_im = np.copy(masked_im) 397 | vec = vec_im.reshape(vec_im.shape[0] * vec_im.shape[1]) 398 | vec = vec[~np.isnan(vec)] 399 | 400 | # Threshold image 401 | if thresh_type == 'Otsu': 402 | # Threshold 403 | t_otsu = filters.threshold_otsu(vec) 404 | 405 | elif thresh_type == 'Peak Fraction': 406 | t_otsu = peak_fraction_generic(vec, thresh_fraction, n_bins) 407 | 408 | # Contour image 409 | contours = measure.find_contours(masked_im, t_otsu) 410 | 411 | # remove contours that contain NaNs 412 | contours_nonans = [] 413 | for k in range(len(contours)): 414 | if np.any(np.isnan(contours[k])): 415 | index_nan = np.where(np.isnan(contours[k]))[0] 416 | contours_temp = np.delete(contours[k], index_nan, axis=0) 417 | if len(contours_temp) > 1: 418 | contours_nonans.append(contours_temp) 419 | else: 420 | contours_nonans.append(contours[k]) 421 | contours = contours_nonans 422 | 423 | return contours, vec, t_otsu 424 | 425 | 426 | 427 | # def cs_sl_extract(index_im, im_ref_buffer, comb_mask, im_classif, settings): 428 | 429 | # # Extract data 430 | # nrows = comb_mask.shape[0] 431 | # ncols = comb_mask.shape[1] 432 | # sand_im = im_classif == 1 433 | # water_im = im_classif == 3 434 | 435 | # # Nan index im 436 | # index = np.copy(index_im) 437 | # index[comb_mask] = np.nan 438 | 439 | # # Reshape to vec 440 | # vec_ind = index.reshape(nrows*ncols,1) 441 | # vec_sand = sand_im.reshape(ncols*nrows) 442 | # vec_water = water_im.reshape(ncols*nrows) 443 | 444 | # # create a buffer around the sandy beach 445 | # se = morphology.disk(settings['buffer_size_pixels']) 446 | # im_buffer = morphology.binary_dilation(sand_im, se) 447 | # vec_buffer = im_buffer.reshape(nrows*ncols) 448 | 449 | # # select water/sand/swash pixels that are within the buffer 450 | # int_water = vec_ind[np.logical_and(vec_buffer,vec_water),:] 451 | # int_sand = vec_ind[np.logical_and(vec_buffer,vec_sand),:] 452 | 453 | # # make sure both classes have the same number of pixels before thresholding 454 | # if len(int_water) > 0 and len(int_sand) > 0: 455 | # if np.argmin([int_sand.shape[0],int_water.shape[0]]) == 1: 456 | # int_sand = int_sand[np.random.choice(int_sand.shape[0],int_water.shape[0], replace=False),:] 457 | # else: 458 | # int_water = int_water[np.random.choice(int_water.shape[0],int_sand.shape[0], replace=False),:] 459 | 460 | # # threshold the sand/water intensities 461 | # int_all = np.append(int_water,int_sand, axis=0) 462 | # t_wi = filters.threshold_otsu(int_all[:,0]) 463 | 464 | # # find contour with MS algorithm 465 | # im_wi_buffer = np.copy(index) 466 | # im_wi_buffer[im_ref_buffer>0] = np.nan 467 | 468 | # contours = measure.find_contours(im_wi_buffer, t_wi) 469 | 470 | # # remove contour points that are NaNs (around clouds) 471 | # contours_nonans = [] 472 | # for k in range(len(contours)): 473 | # if np.any(np.isnan(contours[k])): 474 | # index_nan = np.where(np.isnan(contours[k]))[0] 475 | # contours_temp = np.delete(contours[k], index_nan, axis=0) 476 | # if len(contours_temp) > 1: 477 | # contours_nonans.append(contours_temp) 478 | # else: 479 | # contours_nonans.append(contours[k]) 480 | # contours = contours_nonans 481 | 482 | # return contours 483 | 484 | 485 | 486 | #%% Modified CoastSat Functions - SDS Tools 487 | 488 | def nd_index(im1, im2, cloud_mask): 489 | """ 490 | Computes normalised difference index on 2 images (2D), given a cloud mask (2D). 491 | 492 | KV WRL 2018 493 | 494 | Arguments: 495 | ----------- 496 | im1: np.array 497 | first image (2D) with which to calculate the ND index 498 | im2: np.array 499 | second image (2D) with which to calculate the ND index 500 | cloud_mask: np.array 501 | 2D cloud mask with True where cloud pixels are 502 | 503 | Returns: 504 | ----------- 505 | im_nd: np.array 506 | Image (2D) containing the ND index 507 | 508 | """ 509 | 510 | # reshape the cloud mask 511 | vec_mask = cloud_mask.reshape(im1.shape[0] * im1.shape[1]) 512 | # initialise with NaNs 513 | vec_nd = np.ones(len(vec_mask)) * np.nan 514 | # reshape the two images 515 | vec1 = im1.reshape(im1.shape[0] * im1.shape[1]) 516 | vec2 = im2.reshape(im2.shape[0] * im2.shape[1]) 517 | # compute the normalised difference index 518 | temp = np.divide(vec1[~vec_mask] - vec2[~vec_mask], 519 | vec1[~vec_mask] + vec2[~vec_mask]) 520 | vec_nd[~vec_mask] = temp 521 | # reshape into image 522 | im_nd = vec_nd.reshape(im1.shape[0], im1.shape[1]) 523 | 524 | return im_nd 525 | 526 | 527 | def image_std(image, radius): 528 | """ 529 | Calculates the standard deviation of an image, using a moving window of 530 | specified radius. Uses astropy's convolution library' 531 | 532 | Copied with permission from CoastSat (KV, 2020) 533 | https://github.com/kvos/CoastSat 534 | 535 | Arguments: 536 | ----------- 537 | image: np.array 538 | 2D array containing the pixel intensities of a single-band image 539 | radius: int 540 | radius defining the moving window used to calculate the standard deviation. 541 | For example, radius = 1 will produce a 3x3 moving window. 542 | 543 | Returns: 544 | ----------- 545 | win_std: np.array 546 | 2D array containing the standard deviation of the image 547 | 548 | """ 549 | 550 | # convert to float 551 | image = image.astype(float) 552 | # first pad the image 553 | image_padded = np.pad(image, radius, 'reflect') 554 | # window size 555 | win_rows, win_cols = radius*2 + 1, radius*2 + 1 556 | # calculate std with uniform filters 557 | win_mean = convolve(image_padded, np.ones((win_rows,win_cols)), boundary='extend', 558 | normalize_kernel=True, nan_treatment='interpolate', preserve_nan=True) 559 | win_sqr_mean = convolve(image_padded**2, np.ones((win_rows,win_cols)), boundary='extend', 560 | normalize_kernel=True, nan_treatment='interpolate', preserve_nan=True) 561 | win_var = win_sqr_mean - win_mean**2 562 | win_std = np.sqrt(win_var) 563 | # remove padding 564 | win_std = win_std[radius:-radius, radius:-radius] 565 | 566 | return win_std 567 | 568 | 569 | 570 | def process_shoreline(contours, cloud_mask, georef, image_epsg, settings): 571 | """ 572 | Converts the contours from image coordinates to world coordinates. 573 | This function also removes the contours that are too small to be a shoreline 574 | (based on the parameter settings['min_length_sl']) 575 | 576 | KV WRL 2018 577 | 578 | Modified YD 2020 579 | 580 | Arguments: 581 | ----------- 582 | contours: np.array or list of np.array 583 | image contours as detected by the function find_contours 584 | cloud_mask: np.array 585 | 2D cloud mask with True where cloud pixels are 586 | georef: np.array 587 | vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale] 588 | image_epsg: int 589 | spatial reference system of the image from which the contours were extracted 590 | settings: dict with the following keys 591 | 'output_epsg': int 592 | output spatial reference system 593 | 'min_length_sl': float 594 | minimum length of shoreline contour to be kept (in meters) 595 | 596 | Returns: 597 | ----------- 598 | shoreline: np.array 599 | array of points with the X and Y coordinates of the shoreline 600 | 601 | """ 602 | # convert epsg 603 | out_epsg = int(settings['output_epsg'].replace('EPSG:','')) 604 | # convert pixel coordinates to world coordinates 605 | contours_world = convert_pix2world(contours, georef) 606 | # convert world coordinates to desired spatial reference system 607 | contours_epsg = convert_epsg(contours_world, image_epsg, out_epsg) 608 | # remove contours that have a perimeter < min_length_sl (provided in settings dict) 609 | # this enables to remove the very small contours that do not correspond to the shoreline 610 | contours_long = [] 611 | for l, wl in enumerate(contours_epsg): 612 | coords = [(wl[k,0], wl[k,1]) for k in range(len(wl))] 613 | a = LineString(coords) # shapely LineString structure 614 | if a.length >= settings['min_length_sl']: 615 | contours_long.append(wl) 616 | # format points into np.array 617 | x_points = np.array([]) 618 | y_points = np.array([]) 619 | for k in range(len(contours_long)): 620 | x_points = np.append(x_points,contours_long[k][:,0]) 621 | y_points = np.append(y_points,contours_long[k][:,1]) 622 | contours_array = np.transpose(np.array([x_points,y_points])) 623 | 624 | shoreline = contours_array 625 | 626 | # now remove any shoreline points that are attached to cloud pixels 627 | if sum(sum(cloud_mask)) > 0: 628 | # get the coordinates of the cloud pixels 629 | idx_cloud = np.where(cloud_mask) 630 | idx_cloud = np.array([(idx_cloud[0][k], idx_cloud[1][k]) for k in range(len(idx_cloud[0]))]) 631 | # convert to world coordinates and same epsg as the shoreline points 632 | coords_cloud = convert_epsg(convert_pix2world(idx_cloud, georef), 633 | image_epsg, out_epsg)[:,:-1] 634 | # only keep the shoreline points that are at least _m from any cloud pixel 635 | idx_keep = np.ones(len(shoreline)).astype(bool) 636 | for k in range(len(shoreline)): 637 | if np.any(np.linalg.norm(shoreline[k,:] - coords_cloud, axis=1) < settings['cloud_buffer']): 638 | idx_keep[k] = False 639 | shoreline = shoreline[idx_keep] 640 | 641 | return shoreline 642 | 643 | 644 | def output_to_gdf_PL(output): 645 | """ 646 | Modified from KV WRL 2018 by YD 647 | Adapted to PL metadata info 648 | 649 | Saves the mapped shorelines as a gpd.GeoDataFrame 650 | 651 | KV WRL 2018 652 | 653 | Modified YD 2020 654 | 655 | Arguments: 656 | ----------- 657 | output: dict 658 | contains the coordinates of the mapped shorelines + attributes 659 | 660 | Returns: 661 | ----------- 662 | gdf_all: gpd.GeoDataFrame 663 | contains the shorelines + attirbutes 664 | 665 | """ 666 | 667 | # loop through the mapped shorelines 668 | counter = 0 669 | for i in range(len(output['shorelines'])): 670 | # skip if there shoreline is empty 671 | if len(output['shorelines'][i]) == 0: 672 | continue 673 | else: 674 | # save the geometry + attributes 675 | geom = geometry.LineString(output['shorelines'][i]) 676 | gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(geom)) 677 | gdf.index = [i] 678 | gdf.loc[i,'timestamp'] = output['timestamp utc'][i].strftime('%Y-%m-%d %H:%M:%S') 679 | # gdf.loc[i,'satname'] = output['satname'][i] 680 | #gdf.loc[i,'geoaccuracy'] = output['geoaccuracy'][i] 681 | gdf.loc[i,'cloud_cover'] = output['cloud_cover'][i] 682 | gdf.loc[i,'aoi_coverage'] = output['aoi_cover'][i] 683 | gdf.loc[i,'ps_sat_name'] = output['ps_sat_name'][i] 684 | gdf.loc[i,'name'] = output['name'][i] 685 | 686 | # store into geodataframe 687 | if counter == 0: 688 | gdf_all = gdf 689 | else: 690 | gdf_all = gdf_all.append(gdf) 691 | counter = counter + 1 692 | 693 | return gdf_all 694 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - conda-forge 3 | - defaults 4 | dependencies: 5 | - python 6 | - numpy 7 | - matplotlib 8 | - pandas 9 | - geopandas 10 | - pytz 11 | - scikit-image 12 | - scikit-learn=0.20.3 13 | - astropy 14 | - gdal 15 | - shapely 16 | - scipy 17 | - pyproj 18 | - pykrige 19 | - cartopy 20 | - pyfftw 21 | - rasterio 22 | - arosics 23 | - notebook 24 | - spyder 25 | - PyQtWebEngine -------------------------------------------------------------------------------- /readme_files/extraction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydoherty/CoastSat.PlanetScope/07e60499f024fc10037b5532a4c74fd0cae09e62/readme_files/extraction.png -------------------------------------------------------------------------------- /readme_files/timeseries.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydoherty/CoastSat.PlanetScope/07e60499f024fc10037b5532a4c74fd0cae09e62/readme_files/timeseries.png -------------------------------------------------------------------------------- /user_inputs/NARRA_polygon.kml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 151.2975311279297,-33.734547389681836 4 | 151.31109237670898,-33.734547389681836 5 | 151.31109237670898,-33.70170701318013 6 | 151.2975311279297,-33.70170701318013 7 | 151.2975311279297,-33.734547389681836 8 | 9 | -------------------------------------------------------------------------------- /user_inputs/NARRA_transects.geojson: -------------------------------------------------------------------------------- 1 | { 2 | "type": "FeatureCollection", 3 | "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::28356" } }, 4 | "features": [ 5 | { "type": "Feature", "properties": { "name": "PF1" }, "geometry": { "type": "LineString", "coordinates": [ [ 342880, 6269180 ], [ 343144, 6269037 ] ] } }, 6 | { "type": "Feature", "properties": { "name": "PF2" }, "geometry": { "type": "LineString", "coordinates": [ [ 342733, 6268783 ], [ 343008, 6268664 ] ] } }, 7 | { "type": "Feature", "properties": { "name": "PF4" }, "geometry": { "type": "LineString", "coordinates": [ [ 342451, 6267913 ], [ 342746, 6267859 ] ] } }, 8 | { "type": "Feature", "properties": { "name": "PF6" }, "geometry": { "type": "LineString", "coordinates": [ [ 342460, 6267042 ], [ 342759, 6267075 ] ] } }, 9 | { "type": "Feature", "properties": { "name": "PF8" }, "geometry": { "type": "LineString", "coordinates": [ [ 342675, 6266241 ], [ 342936, 6266388 ] ] } } 10 | ] 11 | } 12 | --------------------------------------------------------------------------------