├── tests ├── __init__.py ├── data1 │ └── S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE.zip ├── data2 │ └── S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE.zip ├── test_search.py ├── test_stsa.py └── data3 │ └── S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE │ └── manifest.safe ├── stsa ├── __init__.py ├── utils.py ├── search.py └── stsa.py ├── sample_webmap.png ├── requirements.txt ├── .github └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .travis.yml ├── codecov.yml ├── setup.py ├── .devcontainer └── devcontainer.json ├── app.py ├── README.md └── LICENSE /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /stsa/__init__.py: -------------------------------------------------------------------------------- 1 | from stsa.search import DownloadXML 2 | from stsa.stsa import TopsSplitAnalyzer 3 | -------------------------------------------------------------------------------- /sample_webmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pbrotoisworo/s1-tops-split-analyzer/HEAD/sample_webmap.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | descartes 2 | fiona 3 | geopandas 4 | folium 5 | xmltodict 6 | streamlit 7 | streamlit-folium 8 | pytest -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## PR Description 2 | 3 | Please put description of changes here. 4 | 5 | ## Linked Issue 6 | Closes #xxx 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | .__pycache__/ 3 | *.pyc 4 | *.zip 5 | *.shp 6 | *.shx 7 | *.prj 8 | *.dbf 9 | *.cpg 10 | .idea/ 11 | .ipynb 12 | *.xml -------------------------------------------------------------------------------- /tests/data1/S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pbrotoisworo/s1-tops-split-analyzer/HEAD/tests/data1/S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE.zip -------------------------------------------------------------------------------- /tests/data2/S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pbrotoisworo/s1-tops-split-analyzer/HEAD/tests/data2/S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE.zip -------------------------------------------------------------------------------- /stsa/utils.py: -------------------------------------------------------------------------------- 1 | import geopandas as gpd 2 | 3 | def gdf_from_wkt(wkt: str) -> gpd.GeoDataFrame: 4 | """ 5 | Create a GeoDataFrame from a WKT string 6 | """ 7 | gs = gpd.GeoSeries.from_wkt([wkt]) 8 | df = gpd.GeoDataFrame(gs, columns=['geometry'], crs='EPSG:4326') 9 | return df 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.11" 4 | - "3.10" 5 | - "3.9" 6 | - "3.8" 7 | before_install: 8 | - python --version 9 | - pip install -U pip 10 | - pip install -U pytest 11 | install: 12 | - pip install -e . 13 | - pip install pytest-cov codecov 14 | script: 15 | - pytest tests --cov=. tests 16 | after_success: 17 | - codecov -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # Codecov should always pass. Codecov is used for informational purposes only 2 | codecov: 3 | require_ci_to_pass: false 4 | notify: 5 | require_ci_to_pass: no 6 | after_n_builds: 1 7 | coverage: 8 | status: 9 | project: 10 | default: 11 | # Require 1% coverage, i.e., always succeed 12 | target: 1 13 | patch: 14 | default: 15 | target: 0 16 | if_no_uploads: error 17 | if_not_found: success 18 | if_ci_failed: failure 19 | changes: false 20 | comment: off 21 | github_checks: 22 | annotations: false -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='stsa', 5 | author='Panji P. Brotoisworo', 6 | url='https://github.com/pbrotoisworo/s1-tops-split-analyzer', 7 | version=0.2, 8 | description='Interface to extract subswath data from Sentinel-1 SAR metadata', 9 | packages=find_packages(), 10 | include_package_data=True, 11 | install_requires=[ 12 | 'descartes', 13 | 'fiona', 14 | 'geopandas', 15 | 'folium', 16 | 'xmltodict', 17 | 'streamlit', 18 | 'streamlit-folium', 19 | 'pytest' 20 | ], 21 | classifiers=[ 22 | "Intended Audience :: Science/Research", 23 | "License :: OSI Approved :: Apache Software License", 24 | "Operating System :: OS Independent", 25 | "Programming Language :: Python", 26 | "Programming Language :: Python :: 3.8", 27 | "Programming Language :: Python :: 3.9", 28 | ], 29 | keywords="SAR, Sentinel-1, Remote Sensing", 30 | ) -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "app.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y 0: 104 | st.text('Geometry overlay intersects with the following bursts:') 105 | st.dataframe(df_intersect) 106 | else: 107 | st.text('No overlap found with the geometry overlay.') 108 | 109 | st.subheader('Burst Data') 110 | download = st.empty() 111 | 112 | # TODO: DeprecationWarning: folium_static is deprecated and will be removed in a future release, or 113 | # simply replaced with with st_folium which always passes returned_objects=[] to the component. 114 | streamlit_folium.folium_static(s1.visualize_webmap(geom_overlay), width=1200, height=800) 115 | 116 | filename = f'{scene}.geojson' 117 | download.download_button('Download GeoJSON', data=s1.df.to_json(), file_name=filename) 118 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # S-1 TOPS SPLIT Analyzer (STSA) 2 | 3 | [![Build Status](https://travis-ci.com/pbrotoisworo/s1-tops-split-analyzer.svg?branch=main)](https://travis-ci.com/pbrotoisworo/s1-tops-split-analyzer) 4 | [![codecov](https://codecov.io/gh/pbrotoisworo/s1-tops-split-analyzer/branch/main/graph/badge.svg?token=EYS8DNVPXL)](https://codecov.io/gh/pbrotoisworo/s1-tops-split-analyzer) 5 | [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/release/python-380/) 6 | [![Open in Streamlit](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://share.streamlit.io/pbrotoisworo/s1-tops-split-analyzer/main/app.py) 7 | 8 | This code intends to help users easily work with the S-1 TOPS SPLIT function in [SNAP software](https://step.esa.int/main/download/snap-download/) by parsing the XML metadata of Sentinel-1 satellite data and converting it into usable data such as shapefiles or webmaps. 9 | 10 | Using S-1 TOPS SPLIT Analyzer you are able to: 11 | * Download XML data directly from [Copernicus Dataspace](https://dataspace.copernicus.eu/) and view TOPS-SPLIT data. No need to download the full 4 GB image to view the TOPS SPLIT data 12 | * View TOPS-SPLIT data from downloaded ZIP files containing full size Sentinel-1 imagery 13 | * View all subswaths at the same time 14 | * Save S1-TOPS-SPLIT data as a shapefile, JSON, or CSV 15 | * View and interact with S1-TOPS-SPLIT data using a webmap. In addition, you can add a polygon to visualize its extent with regards to the S1-TOPS-SPLIT data 16 | 17 | Comments and feedback are welcome. 18 | 19 | # Live Web App 20 | 21 | For easier usage. STSA has a web app which is hosted by Streamlit. [Access it here!](https://share.streamlit.io/pbrotoisworo/s1-tops-split-analyzer/main/app.py) You can view data and download it in GeoJSON format. 22 | 23 | # Installation 24 | 25 | This has been tested to work in Python versions 3.10 26 | 27 | Install STSA into your environment by typing this command: 28 | 29 | `pip install git+https://github.com/pbrotoisworo/s1-tops-split-analyzer.git` 30 | 31 | # Usage 32 | STSA can be used in the command line and as a Python import. 33 | 34 | ## Command Line 35 | CLI access is available if you directly run `stsa.py`. The available flags are: 36 | 37 | | Flag | Description | 38 | | -------- |:---------------------------:| 39 | | -v | Print all statements | 40 | | --zip | Path of Sentinel-1 ZIP file | 41 | | --safe | Path of Sentinel-1 manifest.safe file | 42 | | --swaths | List of target subswaths | 43 | | --polar | Polarization | 44 | | --shp | Path of output shapefile | 45 | | --csv | Path of output CSV file | 46 | | --json | Path of output JSON file | 47 | | --api-user | Copernicus username | 48 | | --api-password | Copernicus password (Leave empty for secure input) | 49 | | --api-scene | Sentinel-1 scene ID to download | 50 | | --api-folder | Folder for downloaded XML files | 51 | 52 | Below is a sample command where user selects subswath IW2 and IW3, specifies VV polarization, and specifies output data. 53 | 54 | ```bash 55 | python stsa.py --zip S1_image.zip --swaths iw2 iw3 --polar vv --shp out_shp.shp --csv out_csv.csv --json out_json.json 56 | ``` 57 | 58 | ## Python Import 59 | 60 | Below are code samples of using `TopsSplitAnalyzer`. When loading Sentinel-1 data please choose either API or ZIP 61 | method only. 62 | 63 | ```python 64 | ############################################ 65 | # Load data using `load_api` or `load_zip` 66 | ############################################ 67 | 68 | import stsa 69 | 70 | s1 = stsa.TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vh') 71 | 72 | # METHOD 1: Load using local ZIP file 73 | s1.load_zip(zip_path='S1_image.zip') 74 | 75 | # METHOD 2: Load using Copernicus Dataspace API 76 | s1.load_api( 77 | 'myusername', 78 | 'S1A_IW_SLC__1SDV_20210627T043102_20210627T043130_038521_048BB9_DA44', 79 | 'mypassword' 80 | ) 81 | ``` 82 | ```python 83 | ################################################################## 84 | # Export the data in your preferred format. 85 | # To visualize on a webmap you need to be using Jupyter Notebook. 86 | ################################################################## 87 | 88 | # Write to shapefile 89 | s1.to_shapefile('data.shp') 90 | 91 | # Get JSON 92 | s1.to_json('json_output.json') 93 | 94 | # Write to CSV 95 | s1.to_csv('output.csv') 96 | 97 | # Visualize on a webmap with additional polygon 98 | s1.visualize_webmap(polygon='mask.shp') 99 | ``` 100 | 101 |

102 | 103 |
104 | Output shown on a webmap 105 |

106 | 107 | # Tests 108 | 109 | If you plan on contributing ensure that is passes all tests by installing `pytest` and typing this in the project directory: 110 | 111 | `pytest tests` 112 | -------------------------------------------------------------------------------- /stsa/search.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import json 4 | 5 | import streamlit as st 6 | 7 | # Custom errors for download XML 8 | class DownloadError(Exception): 9 | pass 10 | 11 | 12 | class DownloadXML: 13 | 14 | def __init__(self, image: str, user: str, password: str, verbose=False, streamlit_mode=False): 15 | """ 16 | Download XML data from Copernicus Scihub API 17 | 18 | :param image: Scene ID of Sentinel-1 image 19 | :param user: Username for Copernicus Scihub 20 | :param password: Password for Copernicus Scihub 21 | :param verbose: Show print statements, defaults to False 22 | :param streamlit_mode: Run stsa for streamlit 23 | """ 24 | self._image = image 25 | self._user = user 26 | self._password = password 27 | self._verbose = verbose 28 | self._url = None 29 | self._CATALOGUE_ODATA_URL = "https://catalogue.dataspace.copernicus.eu/odata/v1/Products" 30 | self._DOWNLOAD_ODATA_URL = "https://download.dataspace.copernicus.eu/odata/v1/Products" 31 | self._streamlit_mode = streamlit_mode 32 | self.session, self.response = self._authenticate(user, password) 33 | self.product_is_online = None 34 | self.xml_paths = [] 35 | 36 | if 'SLC' not in self._image: 37 | msg = 'Scene ID does not belong to an SLC image' 38 | if self._streamlit_mode: 39 | st.error(msg) 40 | st.stop() 41 | else: 42 | raise DownloadError(msg) 43 | 44 | def _authenticate(self, username, password): 45 | """ 46 | Authenticate with Copernicus Dataspace API 47 | """ 48 | data = { 49 | "client_id": "cdse-public", 50 | "username": username, 51 | "password": password, 52 | "grant_type": "password", 53 | } 54 | url = "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token" 55 | session = requests.Session() 56 | response = session.post(url, data=data) 57 | response.raise_for_status() 58 | return session, response 59 | 60 | 61 | def download_xml(self, output_directory: str, polarization: str = 'vv'): 62 | """ 63 | Download XML metadata from Copernicus Scihub 64 | 65 | :param output_directory: Output folder for downloaded files 66 | :param polarization: Polarization of the image. Default is 'vv' 67 | """ 68 | 69 | # Check polarization argument 70 | if polarization.lower() not in ['vv', 'vh', 'hh', 'hv']: 71 | raise DownloadError(f'Polarization "{polarization}" is not accepted.') 72 | polarization = polarization.lower() 73 | 74 | # Get UUID from scene ID 75 | link = self._get_product_uuid_link() 76 | 77 | # Construct URL that shows XML files 78 | # Looks like: 79 | # https://download.dataspace.copernicus.eu/odata/v1/Products({scene_id})/Nodes({scene}.SAFE)/Nodes(annotation)/Nodes 80 | self._url = f"{link}/Nodes({self._image}.SAFE)/Nodes(annotation)/Nodes" 81 | 82 | # Connect and check response code 83 | if self._verbose is True: 84 | print('Connecting to Copernicus API...') 85 | response = self.session.get(self._url) 86 | self._check_requests_status_code(response.status_code) 87 | response_json = json.loads(response.text) 88 | 89 | # Parse response and download XML files 90 | for item in response_json["result"]: 91 | if item["Name"].endswith(".xml") and f'-{polarization}-' in item["Name"]: 92 | # Download XML File 93 | download_url = f"{self._url}({item['Name']})/$value" 94 | token = self.response.json()["access_token"] 95 | response_metadata = self.session.get(download_url, headers={"Authorization": f"Bearer {token}"}) 96 | outpath = os.path.join(output_directory, item["Name"]) 97 | with open(outpath, 'wb') as f: 98 | f.write(response_metadata.content) 99 | self.xml_paths.append(outpath) 100 | 101 | def _check_product_is_online(self, url: str) -> bool: 102 | """ 103 | Check if product is online or not 104 | 105 | :param url: URL of product 106 | :return: True if product is online, False otherwise 107 | :rtype: bool 108 | """ 109 | # Access API 110 | response = self.session.get(url) 111 | self._check_requests_status_code(response.status_code) 112 | response_json = json.loads(response.text) 113 | 114 | # Check if product is online 115 | if response_json["Online"]: 116 | return True 117 | return False 118 | 119 | def _check_requests_status_code(self, code: int): 120 | """ 121 | Check return code of the API. 122 | 123 | :param code: Return code received from server in integer format 124 | """ 125 | 126 | if code == 200: 127 | if self._verbose: 128 | print('Connection successful') 129 | return 130 | elif 200 < code < 300: 131 | msg = f'Connected to server but something went wrong. Status code: {code}' 132 | if self._streamlit_mode: 133 | st.warning(msg) 134 | else: 135 | print(f'Connected to server but something went wrong. Status code: {code}') 136 | elif code == 401: 137 | msg = f'Username and password not valid. Status code: {code}' 138 | if self._streamlit_mode: 139 | st.error(msg) 140 | st.stop() 141 | else: 142 | raise DownloadError(msg) 143 | elif code == 404: 144 | msg = f'Could not connect to server. Status code: {code}' 145 | if self._streamlit_mode: 146 | st.error(msg) 147 | st.stop() 148 | else: 149 | raise DownloadError(msg) 150 | else: 151 | msg = f'API status code: {code}' 152 | if self._streamlit_mode: 153 | st.warning(msg) 154 | else: 155 | print(msg) 156 | return 157 | 158 | def _get_product_uuid_link(self) -> str: 159 | """ 160 | Prepare UUID link according to the scene ID. 161 | Result looks like https://download.dataspace.copernicus.eu/odata/v1/Products(scene_id) 162 | 163 | :return: URL of scene 164 | :rtype: str 165 | """ 166 | 167 | # Search Products archive first. If it is not present there then 168 | # search in the DeletedProducts archive 169 | # Access API 170 | # url = f"https://catalogue.dataspace.copernicus.eu/odata/v1/Products?$filter=Name eq '{self._image}.SAFE'" 171 | url = f"{self._CATALOGUE_ODATA_URL}?$filter=Name eq '{self._image}.SAFE'" 172 | response = self.session.get(url) 173 | self._check_requests_status_code(response.status_code) 174 | response_json = json.loads(response.text) 175 | # Get UUID 176 | if len(response_json["value"]) == 0: 177 | raise DownloadError(f'Scene ID not found for "{self._image}"') 178 | scene_id = response_json["value"][0]["Id"] 179 | # return f"https://download.dataspace.copernicus.eu/odata/v1/Products({scene_id})" 180 | return f"{self._DOWNLOAD_ODATA_URL}({scene_id})" 181 | 182 | 183 | if __name__ == '__main__': 184 | pass 185 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /stsa/stsa.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # Author: Panji P. Brotoisworo 3 | # License: Apache License 2.0 (Apache-2.0) 4 | ########################################### 5 | 6 | import argparse 7 | import json 8 | import glob 9 | import os 10 | import re 11 | import sys 12 | from typing import Union 13 | import xml.etree.ElementTree as ET 14 | import warnings 15 | from zipfile import ZipFile 16 | 17 | import folium 18 | import geopandas as gpd 19 | from shapely.geometry import Polygon 20 | import pandas as pd 21 | 22 | try: 23 | from search import DownloadXML, DownloadError 24 | from utils import gdf_from_wkt 25 | except ImportError: 26 | # Handle import errors when testing 27 | from .search import DownloadXML, DownloadError 28 | from .utils import gdf_from_wkt 29 | 30 | 31 | class TopsSplitAnalyzer: 32 | 33 | def __init__(self, target_subswaths: Union[None, str, list] = None, polarization: str = 'vv', 34 | verbose: bool = True, streamlit_mode: bool = False): 35 | """ 36 | Class to interpret and visualize S1-TOPS-SPLIT data as seen in ESA SNAP software. 37 | 38 | :param target_subswath: String or list containing strings of subswaths to load. Defaults to all subswaths. 39 | :param polarization: Polarization of imagery. Valid options are 'vv' or 'vh' polarizations. Defaults to 'vv' 40 | :param verbose: Print statements. Defaults to True 41 | :param streamlit_mode: Run stsa for streamlit 42 | """ 43 | 44 | if isinstance(target_subswaths, str): 45 | if target_subswaths.lower() not in ['iw1', 'iw2', 'iw3']: 46 | raise ValueError(f'Target subswath "{target_subswaths.lower()}" not a valid subswath') 47 | 48 | if isinstance(target_subswaths, list): 49 | target_subswaths = [x.lower() for x in target_subswaths] 50 | 51 | if polarization is None: 52 | polarization = 'vv' 53 | if polarization.lower() not in ['vv', 'vh', 'hh', 'hv']: 54 | raise ValueError(f'Input polarization "{polarization}" not recognized.') 55 | 56 | # Load and check user inputs 57 | if target_subswaths is None: 58 | # Defaults to all bands if no target subswath specified 59 | target_subswaths = ['iw1', 'iw2', 'iw3'] 60 | 61 | if polarization is None: 62 | # Defaults to VV polarization 63 | polarization = 'vv' 64 | 65 | self._image = None 66 | self.archive = None 67 | self._download_id = None 68 | self._download_folder = None 69 | self._api_user = None 70 | self._api_download = None 71 | self.api_product_is_online = None 72 | self._is_downloaded_scene = None 73 | self._is_manifest_safe = None 74 | self._api_password = None 75 | self._target_subswath = target_subswaths 76 | self.polarization = polarization.lower() 77 | self._verbose = verbose 78 | self._streamlit_mode = streamlit_mode 79 | 80 | # Declare variables 81 | self._metadata = None 82 | self.metadata_file_list = [] 83 | self.total_num_bursts = None 84 | self.df = None 85 | 86 | def load_api(self, username: str, scene_id: str, password: Union[str, None] = None, 87 | download_folder: Union[str, None] = None) -> None: 88 | """ 89 | Load Sentinel-1 scene through the Copernicus API. Only the relevant XML data will be downloaded. 90 | Create an account here: https://scihub.copernicus.eu/dhus/#/self-registration 91 | 92 | :param username: Username for Copernicus Scihub 93 | :param password: Password for Copernicus Scihub. For more secure input do not input a password. User will be 94 | prompted to enter password via hidden input. 95 | :param download_folder: Download folder for the XML files 96 | """ 97 | 98 | self._api_user = username 99 | self._api_password = password 100 | self._download_id = scene_id 101 | self._download_folder = download_folder 102 | self._is_downloaded_scene = True 103 | 104 | if self._download_folder is None: 105 | raise ValueError('User selected to download from API but no output folder is defined') 106 | 107 | download = DownloadXML( 108 | image=self._download_id, 109 | user=self._api_user, 110 | password=self._api_password, 111 | verbose=self._verbose, 112 | streamlit_mode=self._streamlit_mode 113 | ) 114 | download.download_xml( 115 | output_directory=self._download_folder, 116 | polarization=self.polarization 117 | ) 118 | 119 | if not len(download.xml_paths): 120 | 121 | # If running in streamlit mode the app will launch a troubleshooting process to ensure 122 | # continuous running of the app 123 | if not self._streamlit_mode: 124 | raise DownloadError('No matching XML found! Try different parameters') 125 | 126 | print('No matching XML found! Trying again with different polarization.') 127 | # If not metadata is found try one more time with other default value 128 | if self.polarization == 'vv' or self.polarization == 'vh': 129 | print('Vertical polarization detected. Setting polarization to "hh"') 130 | self.polarization = 'hh' 131 | elif self.polarization == 'hh' or self.polarization == 'hv': 132 | print('Horizontal polarization detected. Setting polarization to "vv"') 133 | self.polarization = 'vv' 134 | 135 | download.download_xml( 136 | output_directory=self._download_folder, 137 | polarization=self.polarization 138 | ) 139 | if not len(download.xml_paths): 140 | raise DownloadError('No XML files found after troubleshooting!') 141 | else: 142 | print(f'Troubleshooting method found {len(download.xml_paths)} files using "{self.polarization}" polarization') 143 | 144 | self.api_product_is_online = download.product_is_online 145 | self.metadata_file_list = download.xml_paths 146 | if self.api_product_is_online is False: 147 | return 148 | 149 | # Load metadata 150 | self._load_metadata_paths() 151 | 152 | # Load geometry 153 | self._create_subswath_geometry() 154 | 155 | if self._verbose: 156 | print(f'Found {len(self.metadata_file_list)} XML paths') 157 | 158 | return 159 | 160 | def load_zip(self, zip_path: Union[str, None] = None): 161 | """ 162 | Load ZIP file containing Sentinel-1 SLC data. XML data containing the burst regions will be loaded. 163 | 164 | :param zip_path: 165 | :return: 166 | """ 167 | self._is_downloaded_scene = False 168 | self._image = zip_path 169 | self.archive = ZipFile(self._image) 170 | if self._verbose: 171 | print(f'Loaded ZIP file: {os.path.basename(self._image)}') 172 | 173 | # Load metadata 174 | self._load_metadata_paths() 175 | 176 | # Load geometry 177 | self._create_subswath_geometry() 178 | 179 | if self._verbose: 180 | print(f'Found {len(self.metadata_file_list)} XML paths') 181 | 182 | return 183 | 184 | def load_safe(self, safe_path: Union[str, None] = None): 185 | """ 186 | Load SAFE file containing Sentinel-1 SLC data. XML data containing 187 | the burst regions will be loaded. 188 | 189 | :param safe_path: Path to the manifest.safe file inside the .SAFE 190 | directory. 191 | :return: 192 | """ 193 | self._is_downloaded_scene = False 194 | self._image = os.path.dirname(safe_path) 195 | self._is_manifest_safe = True 196 | if self._verbose: 197 | print(f'Loaded SAFE file {self._image}') 198 | 199 | # Load metadata 200 | self._load_metadata_paths() 201 | 202 | # Load geometry 203 | self._create_subswath_geometry() 204 | 205 | def load_data(self, zip_path: Union[str, None] = None, download_id: Union[str, None] = None, 206 | download_folder: Union[str, None] = None, api_user: Union[str, None] = None, 207 | api_password: Union[str, None] = None): 208 | """ 209 | Load Sentinel-1 XML metadata 210 | 211 | :param zip_path: Path of ZIP file containing full Sentinel-1 image, defaults to None 212 | :param download_id: Scene ID of Sentinel-1 which will be downloaded, defaults to None 213 | :param download_folder: Folder where downloaded XML files will be saved, defaults to None 214 | :param api_user: Username for Copernicus Scihub, defaults to None 215 | :param api_password: Password for Copernicus Scihub, defaults to None 216 | """ 217 | 218 | # Deprecation message 219 | warnings.warn('"load_data" is deprecated and will be removed soon. Please use "load_api" or "load_zip" ' 220 | 'in the future.') 221 | 222 | if zip_path is None and download_id is None: 223 | raise ValueError('No input data detected!') 224 | 225 | self._image = zip_path 226 | self._download_id = download_id 227 | self._download_folder = download_folder 228 | self._api_user = api_user 229 | self._api_password = api_password 230 | self._is_downloaded_scene = False 231 | 232 | # Use ZIP file 233 | if self._image is not None and self._download_id is None: 234 | self.archive = ZipFile(self._image) 235 | if self._verbose: 236 | print(f'Loaded ZIP file: {os.path.basename(self._image)}') 237 | 238 | # Download data 239 | else: 240 | self._is_downloaded_scene = True 241 | 242 | if self._download_folder is None: 243 | raise ValueError('User selected to download from API but no output folder is defined') 244 | 245 | download = DownloadXML( 246 | image=self._download_id, 247 | user=self._api_user, 248 | password=self._api_password, 249 | verbose=self._verbose 250 | ) 251 | download.download_xml( 252 | output_directory=self._download_folder, 253 | polarization=self.polarization 254 | ) 255 | self.metadata_file_list = download.xml_paths 256 | if download.product_is_online is False: 257 | return 258 | 259 | # Load metadata 260 | self._load_metadata_paths() 261 | 262 | # Load geometry 263 | self._create_subswath_geometry() 264 | 265 | if self._verbose: 266 | print(f'Found {len(self.metadata_file_list)} XML paths') 267 | return 268 | 269 | def _load_metadata_paths(self): 270 | """ 271 | Get paths of metadata files based on RegEx string match 272 | """ 273 | 274 | if self._is_downloaded_scene is False: 275 | 276 | # Reset metadata list before loading 277 | self.metadata_file_list = [] 278 | 279 | # Get file list 280 | if self._is_manifest_safe: 281 | archive_files = glob.glob(os.path.join(self._image, 'annotation', '*.xml'), recursive=True) 282 | else: 283 | archive_files = self.archive.namelist() 284 | 285 | # Get metadata files 286 | regex_filter = r's1(?:a|b)-iw\d-slc-(?:vv|vh|hh|hv)-.*\.xml' 287 | 288 | for item in archive_files: 289 | if 'calibration' in item: 290 | continue 291 | match = re.search(regex_filter, item) 292 | if match: 293 | self.metadata_file_list.append(item) 294 | if not self.metadata_file_list: 295 | raise Exception(f'No metadata files found in {os.path.basename(self._image)}') 296 | else: 297 | if len(self.metadata_file_list) == 0: 298 | raise Exception(f'No XML files found in {self._download_folder}') 299 | 300 | # Get metadata 301 | def _load_metadata(self, target_subswath=None, target_polarization=None): 302 | """ 303 | Load XML data 304 | :param target_subswath: Desired subswath for metadata extraction 305 | :param target_polarization: Desired polarization for metadata extraction 306 | :return: ZipFle object which contains XML file to be loaded into ElementTree 307 | """ 308 | 309 | if not target_subswath: 310 | target_subswath = self._target_subswath 311 | if not target_polarization: 312 | target_polarization = self.polarization 313 | 314 | assert isinstance(target_subswath, str) is True, f'Expected string for target_subswath for _load_metadata. Got {target_subswath} which is type {type(target_subswath)}' 315 | 316 | target_file = None 317 | for item in self.metadata_file_list: 318 | if target_subswath in item and target_polarization in item: 319 | target_file = item 320 | if not target_file: 321 | raise Exception(f'Found no matching XML file with target subswath "{target_subswath}" and target polarization "{target_polarization}". \ 322 | Possible matches: {self.metadata_file_list}') 323 | 324 | if self._is_downloaded_scene is False: 325 | if self._is_manifest_safe: 326 | metadata = open(target_file) 327 | else: 328 | # Open XML from ZIP 329 | metadata = self.archive.open(target_file) 330 | else: 331 | # Open XML from downloaded or SAFE files 332 | metadata = target_file 333 | 334 | self._metadata = metadata 335 | 336 | self._check_metadata_loaded = True 337 | 338 | def _parse_location_grid(self): 339 | """ 340 | Parse the XML metadata and get the coordinate data stored in geolocationGrid 341 | """ 342 | 343 | tree = ET.parse(self._metadata) 344 | root = tree.getroot() 345 | 346 | # Get subswath and burst coordinates 347 | lines = [] 348 | coord_list = [] 349 | for grid_list in root.iter('geolocationGrid'): 350 | for point in grid_list: 351 | for item in point: 352 | lat = item.find('latitude').text 353 | lon = item.find('longitude').text 354 | line = item.find('line').text 355 | lines.append(line) 356 | coord_list.append((float(lat), float(lon))) 357 | if self.total_num_bursts is None and self._verbose: 358 | print(f'Loaded location grid with {len(set(lines)) - 1} bursts and {len(coord_list)} coordinates') 359 | self.total_num_bursts = len(set(lines)) - 1 360 | 361 | return coord_list 362 | 363 | def _parse_subswath_geometry(self, coord_list): 364 | """ 365 | Create geometry from location grid XML data 366 | :param coord_list: Input coord list loaded from geolocationGrid metadata 367 | :return: 368 | """ 369 | def get_coords(index, coord_list): 370 | coord = coord_list[index] 371 | assert isinstance(coord[1], float) 372 | assert isinstance(coord[0], float) 373 | return coord[1], coord[0] 374 | 375 | bursts_dict = {} 376 | top_right_idx = 0 377 | top_left_idx = 20 378 | bottom_left_idx = 41 379 | bottom_right_idx = 21 380 | 381 | for burst_num in range(1, self.total_num_bursts + 1): 382 | # Create polygon 383 | burst_polygon = Polygon( 384 | [ 385 | [get_coords(top_right_idx, coord_list)[0], get_coords(top_right_idx, coord_list)[1]], # Top right 386 | [get_coords(top_left_idx, coord_list)[0], get_coords(top_left_idx, coord_list)[1]], # Top left 387 | [get_coords(bottom_left_idx, coord_list)[0], get_coords(bottom_left_idx, coord_list)[1]], # Bottom left 388 | [get_coords(bottom_right_idx, coord_list)[0], get_coords(bottom_right_idx, coord_list)[1]] # Bottom right 389 | ] 390 | ) 391 | 392 | top_right_idx += 21 393 | top_left_idx += 21 394 | bottom_left_idx += 21 395 | bottom_right_idx += 21 396 | 397 | bursts_dict[burst_num] = burst_polygon 398 | 399 | return bursts_dict 400 | 401 | def _create_subswath_geometry(self): 402 | """ 403 | Create geodataframe from the XML metadata 404 | """ 405 | 406 | if isinstance(self._target_subswath, list): 407 | # Create empty dataframe for subswaths to be added into 408 | df_all = gpd.GeoDataFrame(columns=['subswath', 'burst', 'geometry'], crs='EPSG:4326') 409 | for subswath in self._target_subswath: 410 | self._load_metadata(subswath, self.polarization) 411 | coord_list = self._parse_location_grid() 412 | subswath_geom = self._parse_subswath_geometry(coord_list) 413 | df = gpd.GeoDataFrame( 414 | {'subswath': [subswath.upper()] * len(subswath_geom), 415 | 'burst': [x for x in subswath_geom.keys()], 416 | 'geometry': [x for x in subswath_geom.values()] 417 | }, 418 | crs='EPSG:4326' 419 | ) 420 | # Concat to main dataframe 421 | df_all = gpd.GeoDataFrame(pd.concat([df_all, df])) 422 | # The index will be repeating itself if we include multiple subswaths 423 | # Thus we reset it 424 | df_all = df_all.reset_index(drop=True) 425 | else: 426 | # Write one subswath only 427 | self._load_metadata() 428 | coord_list = self._parse_location_grid() 429 | subswath_geom = self._parse_subswath_geometry(coord_list) 430 | df_all = gpd.GeoDataFrame( 431 | {'subswath': [self._target_subswath.upper()] * len(subswath_geom), 432 | 'burst': [x for x in subswath_geom.keys()], 433 | 'geometry': [x for x in subswath_geom.values()] 434 | }, 435 | crs='EPSG:4326' 436 | ) 437 | 438 | self.df = df_all 439 | if self.df is None: 440 | raise Exception('Dataframe is empty. Please check data is loaded properly') 441 | 442 | if self._streamlit_mode is True: 443 | for item in self.metadata_file_list: 444 | os.remove(item) 445 | 446 | def intersecting_bursts(self, geom): 447 | """ 448 | Find intersecting SLC bursts numbers and their subswath according to an input 449 | geometry. 450 | 451 | :return: list 452 | """ 453 | intersecting = [] 454 | 455 | if not isinstance(geom, gpd.GeoDataFrame): 456 | geom = gpd.read_file(geom) 457 | 458 | for i, row in self.df.iterrows(): 459 | if row['geometry'].intersects(geom['geometry'][0]): 460 | intersecting.append((row['subswath'], row['burst'])) 461 | return intersecting 462 | 463 | def to_json(self, output_file): 464 | """ 465 | Returns S1-TOPS-SPLIT data in JSON format 466 | :return: JSON 467 | """ 468 | json_data = json.loads(self.df.to_json()) 469 | with open(output_file, 'w') as f: 470 | json.dump(json_data, f, indent=4) 471 | 472 | def to_shapefile(self, output_file): 473 | """ 474 | Write shapefile of S1-TOPS-SPLIT data 475 | :param output_file: Path of output shapefile 476 | """ 477 | self.df.to_file(filename=output_file) 478 | 479 | def to_csv(self, output_file): 480 | """ 481 | Write CSV of S1-TOPS-SPLIT data 482 | :param output_file: Path of output CSV file 483 | """ 484 | self.df.to_csv(output_file, index=False) 485 | 486 | def visualize_webmap(self, polygon=None): 487 | """ 488 | Visualize S1-TOPS data on a Folium webmap. Intended for Jupyter Notebook environments 489 | :param polygon: Path of additional shapefile to visualize 490 | :return: Folium webamp 491 | """ 492 | with warnings.catch_warnings(): 493 | warnings.simplefilter("ignore") 494 | location_x = self.df.centroid.map(lambda p: p.x).iloc[0] 495 | location_y = self.df.centroid.map(lambda p: p.y).iloc[0] 496 | m = folium.Map(location=[location_y, location_x], zoom_start=8) 497 | 498 | boundary = folium.FeatureGroup(name='Sentinel-1 Data').add_to(m) 499 | 500 | for item in self.df.iterrows(): 501 | subswath = item[1]['subswath'] 502 | burst = item[1]['burst'] 503 | geom = item[1]['geometry'] 504 | info = f'SUBSWATH: {subswath}
Burst: {burst}' 505 | feature = folium.GeoJson(data=geom, tooltip=info).add_to(boundary) 506 | boundary.add_child(feature) 507 | 508 | if polygon is not None: 509 | # Visualize additional polygon in red color 510 | style = {'fillColor': '#cc0000', 'color': '#cc0000'} 511 | if isinstance(polygon, gpd.GeoDataFrame): 512 | df_mask = polygon 513 | else: 514 | df_mask = gpd.read_file(polygon) 515 | folium.GeoJson(data=df_mask, tooltip='User loaded shapefile', style_function=lambda x: style, name='Additional Polygon').add_to(m) 516 | folium.LayerControl().add_to(m) 517 | return m 518 | 519 | def close(self): 520 | """ 521 | Close connection to ZIP file 522 | """ 523 | self.archive.close() 524 | 525 | 526 | if __name__ == '__main__': 527 | 528 | # Define CLI flags and parse inputs 529 | parser = argparse.ArgumentParser(description='S-1 TOPS SPLIT Analyzer') 530 | 531 | main_args = parser.add_argument_group('Script Parameters') 532 | main_args.add_argument('-v', help='Verbose mode', action='store_true') 533 | main_args.add_argument('--zip', help='Input Sentinel-1 ZIP file') 534 | main_args.add_argument('--api-scene', help='Target scene ID to download') 535 | main_args.add_argument('--api-user', help='Username for Copernicus Scihub API') 536 | main_args.add_argument('--api-password', help='Password for Copernicus Scihub API') 537 | main_args.add_argument('--api-folder', help='Folder for downloaded XML files') 538 | main_args.add_argument('--safe', help='Input Sentinel-1 manifest.safe file') 539 | 540 | xml_args = parser.add_argument_group('XML Parsing Parameters') 541 | xml_args.add_argument('--swaths', help='List of subswaths', nargs='*', choices=['iw1', 'iw2', 'iw3']) 542 | xml_args.add_argument('--polar', help='Polarization', choices=['vv', 'vh']) 543 | xml_args.add_argument('--shp', help='Output path of shapefile') 544 | xml_args.add_argument('--csv', help='Output path of CSV file') 545 | xml_args.add_argument('--json', help='Output path of JSON file') 546 | 547 | args = parser.parse_args() 548 | args = vars(args) 549 | 550 | s1 = TopsSplitAnalyzer( 551 | target_subswaths=args['swaths'], 552 | polarization=args['polar'] 553 | ) 554 | 555 | if args['zip'] and (args['api_user'] and args['api_password']): 556 | print('Error! Input detected for ZIP and API methods. Please use one method only.') 557 | sys.exit() 558 | 559 | if args['zip'] is not None: 560 | s1.load_zip(zip_path=args['zip']) 561 | elif args['safe'] is not None: 562 | s1.load_safe(safe_path=args['safe']) 563 | else: 564 | s1.load_api( 565 | username=args['api_user'], 566 | password=args['api_password'], 567 | scene_id=args['api_scene'], 568 | download_folder=args['api_folder'] 569 | ) 570 | 571 | if args['shp']: 572 | print('Writing shapefile to', args['shp']) 573 | s1.to_shapefile(args['shp']) 574 | if args['csv']: 575 | print('Writing CSV to', args['csv']) 576 | s1.to_csv(args['csv']) 577 | if args['json']: 578 | print('Writing JSON to', args['json']) 579 | s1.to_json(args['json']) 580 | -------------------------------------------------------------------------------- /tests/test_stsa.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import subprocess 4 | import sys 5 | from collections import Counter 6 | 7 | REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 8 | STSA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'stsa') 9 | # sys.path.append(REPO_ROOT) 10 | # sys.path.append(STSA_DIR) 11 | 12 | import pandas as pd 13 | import pytest 14 | 15 | from stsa import TopsSplitAnalyzer 16 | 17 | global data1 18 | global data2 19 | global data3 20 | 21 | TEST_DIR = os.path.abspath('tests') 22 | data1 = os.path.join(TEST_DIR, 'data1', 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE.zip') 23 | data2 = os.path.join(TEST_DIR, 'data2', 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE.zip') 24 | data3 = os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'mainfest.safe') 25 | 26 | ################################################################################# 27 | # Input tests 28 | ################################################################################# 29 | 30 | def test_TopsSplitAnalyzer_string_subswath(): 31 | 32 | s1 = TopsSplitAnalyzer(target_subswaths='iw2') 33 | s1.load_zip(zip_path=data2) 34 | out_file = 'test_json.json' 35 | s1.to_json(out_file) 36 | 37 | assert os.path.exists(out_file), 'No output detected for string input of subswath' 38 | os.remove(out_file) 39 | 40 | def test_TopsSplitAnalyzer_use_defaults(): 41 | 42 | s1 = TopsSplitAnalyzer() 43 | s1.load_zip(zip_path=data2) 44 | expected = 'vv' 45 | actual = s1.polarization 46 | assert expected == actual, f'Polarization does not match. Actual is {actual}. Expected is {expected}' 47 | 48 | expected = ['iw1', 'iw2', 'iw3'] 49 | actual = s1._target_subswath 50 | 51 | assert actual == expected, f'Subswaths do not match. Actual is {actual}. Expected is {expected}' 52 | 53 | def test_TopsSplitAnalyzer_string_caps_input(): 54 | "Check input if in all caps" 55 | s1 = TopsSplitAnalyzer(target_subswaths=['IW1', 'IW2', 'IW3'], polarization='VV') 56 | s1.load_zip(zip_path=data2) 57 | 58 | expected = 'vv' 59 | actual = s1.polarization 60 | assert actual == expected, f'Polarization does not match. Actual is {actual}. Expected is {expected}' 61 | 62 | expected = ['iw1', 'iw2', 'iw3'] 63 | actual = s1._target_subswath 64 | assert actual == expected, f'Subswaths do not match. Actual is {actual}. Expected is {expected}' 65 | 66 | # Check internal dataframe 67 | s1.to_json('scratch.json') 68 | expected = 'IW1' 69 | actual = s1.df.iloc[0]['subswath'] 70 | assert actual == expected, f'Subswath string does not match. Actual is {actual}. Expected us {expected}' 71 | os.remove('scratch.json') 72 | 73 | @pytest.mark.xfail 74 | def test_TopsSplitAnalyzer_xfail_subswath1(): 75 | "Should fail due to improper input for subswaths" 76 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1 iw2 iw3'], polarization='vv') 77 | s1.load_zip(zip_path=data2) 78 | 79 | @pytest.mark.xfail 80 | def test_TopsSplitAnalyzer_xfail_subswath2(): 81 | "Should fail due to iw4 not being a valid subswath" 82 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw4'], polarization='vv') 83 | s1.load_zip(zip_path=data2) 84 | 85 | @pytest.mark.xfail 86 | def test_TopsSplitAnalyzer_xfail_invalid_polarization(): 87 | "Should fail due to invalid polarization" 88 | s1 = TopsSplitAnalyzer(polarization='HV') 89 | s1.load_zip(zip_path=data2) 90 | 91 | @pytest.mark.xfail 92 | def test_non_single_input(): 93 | "Should fail due to multiple inputs from API and ZIP. Should only be one input." 94 | 95 | cmd = f'python stsa.py --zip {data1} --api-user 123 --api-password 123'.split() 96 | subprocess.call(cmd, cwd=STSA_DIR) 97 | return 98 | 99 | def test_TopsSplitAnalyzer_cli_json(): 100 | "CLI with JSON output" 101 | 102 | out_file = os.path.join(TEST_DIR, 'cli_json_out.json') 103 | cmd = f'python stsa.py --zip {data1} --json {out_file}'.split() 104 | subprocess.call(cmd, cwd=STSA_DIR) 105 | 106 | assert os.path.exists(out_file), f'CLI did not generate expected output. Expected output file {out_file}' 107 | os.remove(out_file) 108 | 109 | def test_TopsSplitAnalyzer_cli_shp(): 110 | "CLI with shp output" 111 | 112 | out_file = os.path.join(TEST_DIR, 'cli_shp_out.shp') 113 | cmd = f'python stsa.py --zip {data1} --shp {out_file}'.split() 114 | subprocess.call(cmd, cwd=STSA_DIR) 115 | 116 | assert os.path.exists(out_file), f'CLI did not generate expected output. Expected output file {out_file}' 117 | 118 | out_file_basename = out_file.replace('.shp', '') 119 | os.remove(out_file_basename + '.cpg') 120 | os.remove(out_file_basename + '.dbf') 121 | os.remove(out_file_basename + '.prj') 122 | os.remove(out_file_basename + '.shx') 123 | os.remove(out_file_basename + '.shp') 124 | 125 | def test_TopsSplitAnalyzer_cli_csv(): 126 | "CLI with CSV output" 127 | 128 | out_file = os.path.join(TEST_DIR, 'cli_csv_out.csv') 129 | cmd = f'python stsa.py --zip {data1} --csv {out_file}'.split() 130 | subprocess.call(cmd, cwd=STSA_DIR) 131 | 132 | assert os.path.exists(out_file), f'CLI did not generated expected output file {out_file}' 133 | os.remove(out_file) 134 | 135 | def test_TopsSplitAnalyzer_cli_subswaths(): 136 | "CLI with different subswath inputs" 137 | 138 | out_file = os.path.join(TEST_DIR, 'out_csv.csv') 139 | 140 | cmd = f'python stsa.py --zip {data1} --swaths iw1 --csv {out_file}'.split() 141 | subprocess.call(cmd, cwd=STSA_DIR) 142 | 143 | df = pd.read_csv(out_file) 144 | expected = 9 145 | actual = len(df) 146 | assert actual == expected, f'CSV length did not match expected output. Actual was {actual}. Expected is {expected}' 147 | os.remove(out_file) 148 | 149 | cmd = f'python stsa.py --zip {data1} --swaths iw1 iw3 --csv {out_file}'.split() 150 | subprocess.call(cmd, cwd=STSA_DIR) 151 | 152 | df = pd.read_csv(out_file) 153 | expected = 18 154 | actual = len(df) 155 | assert actual == expected, f'CSV length did not match expected output. Actual was {actual}. Expected is {expected}' 156 | os.remove(out_file) 157 | 158 | cmd = f'python stsa.py --zip {data1} --swaths iw1 iw3 iw2 --csv {out_file}'.split() 159 | subprocess.call(cmd, cwd=STSA_DIR) 160 | 161 | df = pd.read_csv(out_file) 162 | expected = 27 163 | actual = len(df) 164 | assert actual == expected, f'CSV length did not match expected output. Actual was {actual}. Expected is {expected}' 165 | os.remove(out_file) 166 | 167 | ################################################################################# 168 | # Output tests 169 | ################################################################################# 170 | 171 | # Using data1 folder 172 | def test_TopsSplitAnalyzer_data1_all_vv(): 173 | 174 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vv') 175 | s1.load_zip(zip_path=data1) 176 | 177 | ######################################################################## 178 | # Check metadata list. All 6 items should be loaded regardless of user input 179 | ######################################################################## 180 | 181 | expected = ['S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw1-slc-vh-20201123t142500-20201123t142525-035377-042241-001.xml', 182 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw1-slc-vv-20201123t142500-20201123t142525-035377-042241-004.xml', 183 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw2-slc-vh-20201123t142458-20201123t142524-035377-042241-002.xml', 184 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw2-slc-vv-20201123t142458-20201123t142524-035377-042241-005.xml', 185 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw3-slc-vh-20201123t142459-20201123t142524-035377-042241-003.xml', 186 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw3-slc-vv-20201123t142459-20201123t142524-035377-042241-006.xml'] 187 | actual = s1.metadata_file_list 188 | assert expected == actual, 'Metadata list does not match' 189 | 190 | ######################################################################## 191 | # Check shapefile data was generated 192 | ######################################################################## 193 | 194 | out_shp_items = [ 195 | os.path.join(TEST_DIR, 'swaths_all_vv.cpg'), 196 | os.path.join(TEST_DIR, 'swaths_all_vv.dbf'), 197 | os.path.join(TEST_DIR, 'swaths_all_vv.prj'), 198 | os.path.join(TEST_DIR, 'swaths_all_vv.shp'), 199 | os.path.join(TEST_DIR, 'swaths_all_vv.shx') 200 | ] 201 | s1.to_shapefile(out_shp_items[3]) 202 | for item in out_shp_items: 203 | expected = True 204 | actual = os.path.exists(item) 205 | assert actual == expected, f'Shapefile was not generated correctly. Missing: {item}' 206 | 207 | ######################################################################## 208 | # Check JSON was generated correctly 209 | ######################################################################## 210 | 211 | json_out = os.path.join(TEST_DIR, 'test_json.json') 212 | s1.to_json(json_out) 213 | 214 | # Read output JSON file 215 | with open(json_out) as json_file: 216 | data = json.load(json_file) 217 | assert isinstance(data, dict) == True, 'Error when loading JSON as Python dictionary' 218 | 219 | # Test contents 220 | expected = 'FeatureCollection' 221 | actual = data['type'] 222 | assert expected == actual, f'JSON data for "type" does not match' 223 | 224 | # Test id 0 225 | expected = {'burst': 1, 'subswath': 'IW1'} 226 | actual = data['features'][0]['properties'] 227 | assert expected == actual, f'JSON features with ID 0 do not match. Actual is {actual}. Expected is {expected}' 228 | # Test id 23 229 | expected = {'burst': 6, 'subswath': 'IW3'} 230 | actual = data['features'][23]['properties'] 231 | assert expected == actual, f'JSON features with ID 23 do not match. Actual is {actual}. Expected is {expected}' 232 | 233 | ######################################################################## 234 | # Check CSV was generated correctly 235 | ######################################################################## 236 | 237 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 238 | s1.to_csv(csv_out) 239 | 240 | df = pd.read_csv(csv_out) 241 | 242 | # Test CSV size 243 | expected = 27 244 | actual = len(df) 245 | assert expected == actual, f'CSV file does not match. Actual is {actual} rows. Expected {expected} rows' 246 | 247 | # Check id 0 248 | expected = 'IW1' 249 | actual = df.iloc[0]['subswath'] 250 | assert expected == actual, f'Subswath for row 0 does not match. Actual is {actual}. Expected is {expected}' 251 | 252 | expected = 1 253 | actual = df.iloc[0]['burst'] 254 | assert expected == actual, f'Burst for row 0 does not match. Actual is {actual}. Expected is {expected}' 255 | 256 | # Delete files after tests 257 | for item in out_shp_items: 258 | os.remove(item) 259 | os.remove(json_out) 260 | os.remove(csv_out) 261 | 262 | def test_TopsSplitAnalyzer_data1_all_vh(): 263 | 264 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vh') 265 | s1.load_zip(zip_path=data1) 266 | 267 | ######################################################################## 268 | # Check metadata list. All 6 items should be loaded regardless of user input 269 | ######################################################################## 270 | 271 | expected = ['S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw1-slc-vh-20201123t142500-20201123t142525-035377-042241-001.xml', 272 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw1-slc-vv-20201123t142500-20201123t142525-035377-042241-004.xml', 273 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw2-slc-vh-20201123t142458-20201123t142524-035377-042241-002.xml', 274 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw2-slc-vv-20201123t142458-20201123t142524-035377-042241-005.xml', 275 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw3-slc-vh-20201123t142459-20201123t142524-035377-042241-003.xml', 276 | 'S1A_IW_SLC__1SDV_20201123T142458_20201123T142525_035377_042241_C054.SAFE/annotation/s1a-iw3-slc-vv-20201123t142459-20201123t142524-035377-042241-006.xml'] 277 | actual = s1.metadata_file_list 278 | assert expected == actual, 'Metadata list does not match' 279 | 280 | ######################################################################## 281 | # Check shapefile data was generated 282 | ######################################################################## 283 | 284 | out_shp_items = [ 285 | os.path.join(TEST_DIR, 'swaths_all_vh.cpg'), 286 | os.path.join(TEST_DIR, 'swaths_all_vh.dbf'), 287 | os.path.join(TEST_DIR, 'swaths_all_vh.prj'), 288 | os.path.join(TEST_DIR, 'swaths_all_vh.shp'), 289 | os.path.join(TEST_DIR, 'swaths_all_vh.shx') 290 | ] 291 | s1.to_shapefile(out_shp_items[3]) 292 | for item in out_shp_items: 293 | expected = True 294 | actual = os.path.exists(item) 295 | assert actual == expected, f'Shapefile was not generated correctly. Missing: {item}' 296 | 297 | ######################################################################## 298 | # Check JSON was generated correctly 299 | ######################################################################## 300 | 301 | json_out = os.path.join(TEST_DIR, 'test_json.json') 302 | s1.to_json(json_out) 303 | 304 | # Read output JSON file 305 | with open(json_out) as json_file: 306 | data = json.load(json_file) 307 | assert isinstance(data, dict) == True, 'Error when loading JSON as Python dictionary' 308 | 309 | # Test contents 310 | expected = 'FeatureCollection' 311 | actual = data['type'] 312 | assert expected == actual, f'JSON data for "type" does not match' 313 | 314 | # Test id 0 315 | expected = {'burst': 1, 'subswath': 'IW1'} 316 | actual = data['features'][0]['properties'] 317 | assert expected == actual, f'JSON features with ID 0 do not match. Actual is {actual}. Expected is {expected}' 318 | # Test id 23 319 | expected = {'burst': 6, 'subswath': 'IW3'} 320 | actual = data['features'][23]['properties'] 321 | assert expected == actual, f'JSON features with ID 23 do not match. Actual is {actual}. Expected is {expected}' 322 | 323 | ######################################################################## 324 | # Check CSV was generated correctly 325 | ######################################################################## 326 | 327 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 328 | s1.to_csv(csv_out) 329 | 330 | df = pd.read_csv(csv_out) 331 | 332 | # Test CSV size 333 | expected = 27 334 | actual = len(df) 335 | assert expected == actual, f'CSV file does not match. Actual is {actual} rows. Expected {expected} rows' 336 | 337 | # Check id 0 338 | expected = 'IW1' 339 | actual = df.iloc[0]['subswath'] 340 | assert expected == actual, f'Subswath for row 0 does not match. Actual is {actual}. Expected is {expected}' 341 | 342 | expected = 1 343 | actual = df.iloc[0]['burst'] 344 | assert expected == actual, f'Burst for row 0 does not match. Actual is {actual}. Expected is {expected}' 345 | 346 | # Delete files after tests 347 | for item in out_shp_items: 348 | os.remove(item) 349 | os.remove(json_out) 350 | os.remove(csv_out) 351 | 352 | # Using data2 folder 353 | def test_TopsSplitAnalyzer_data2_all_vv(): 354 | 355 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vv') 356 | s1.load_zip(zip_path=data2) 357 | 358 | ######################################################################## 359 | # Check metadata list. All 6 items should be loaded regardless of user input 360 | ######################################################################## 361 | 362 | expected = ['S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw1-slc-vh-20200929t214703-20200929t214728-034579-04068c-001.xml', 363 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw1-slc-vv-20200929t214703-20200929t214728-034579-04068c-004.xml', 364 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw2-slc-vh-20200929t214701-20200929t214726-034579-04068c-002.xml', 365 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw2-slc-vv-20200929t214701-20200929t214726-034579-04068c-005.xml', 366 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw3-slc-vh-20200929t214702-20200929t214727-034579-04068c-003.xml', 367 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw3-slc-vv-20200929t214702-20200929t214727-034579-04068c-006.xml'] 368 | 369 | actual = s1.metadata_file_list 370 | assert expected == actual, 'Metadata list does not match' 371 | 372 | ######################################################################## 373 | # Check shapefile data was generated 374 | ######################################################################## 375 | 376 | out_shp_items = [ 377 | os.path.join(TEST_DIR, 'swaths_all_vv.cpg'), 378 | os.path.join(TEST_DIR, 'swaths_all_vv.dbf'), 379 | os.path.join(TEST_DIR, 'swaths_all_vv.prj'), 380 | os.path.join(TEST_DIR, 'swaths_all_vv.shp'), 381 | os.path.join(TEST_DIR, 'swaths_all_vv.shx') 382 | ] 383 | s1.to_shapefile(out_shp_items[3]) 384 | for item in out_shp_items: 385 | expected = True 386 | actual = os.path.exists(item) 387 | assert actual == expected, f'Shapefile was not generated correctly. Missing: {item}' 388 | 389 | ######################################################################## 390 | # Check JSON was generated correctly 391 | ######################################################################## 392 | 393 | json_out = os.path.join(TEST_DIR, 'test_json.json') 394 | s1.to_json(json_out) 395 | 396 | # Read output JSON file 397 | with open(json_out) as json_file: 398 | data = json.load(json_file) 399 | assert isinstance(data, dict) == True, 'Error when loading JSON as Python dictionary' 400 | 401 | # Test contents 402 | expected = 'FeatureCollection' 403 | actual = data['type'] 404 | assert expected == actual, f'JSON data for "type" does not match' 405 | 406 | # Test id 0 407 | expected = {'burst': 1, 'subswath': 'IW1'} 408 | actual = data['features'][0]['properties'] 409 | assert expected == actual, f'JSON features with ID 0 do not match. Actual is {actual}. Expected is {expected}' 410 | # Test id 23 411 | expected = {'burst': 6, 'subswath': 'IW3'} 412 | actual = data['features'][23]['properties'] 413 | assert expected == actual, f'JSON features with ID 23 do not match. Actual is {actual}. Expected is {expected}' 414 | 415 | ######################################################################## 416 | # Check CSV was generated correctly 417 | ######################################################################## 418 | 419 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 420 | s1.to_csv(csv_out) 421 | 422 | df = pd.read_csv(csv_out) 423 | 424 | # Test CSV size 425 | expected = 27 426 | actual = len(df) 427 | assert expected == actual, f'CSV file does not match. Actual is {actual} rows. Expected {expected} rows' 428 | 429 | # Check id 0 430 | expected = 'IW1' 431 | actual = df.iloc[0]['subswath'] 432 | assert expected == actual, f'Subswath for row 0 does not match. Actual is {actual}. Expected is {expected}' 433 | 434 | expected = 1 435 | actual = df.iloc[0]['burst'] 436 | assert expected == actual, f'Burst for row 0 does not match. Actual is {actual}. Expected is {expected}' 437 | 438 | # Delete files after tests 439 | for item in out_shp_items: 440 | os.remove(item) 441 | os.remove(json_out) 442 | os.remove(csv_out) 443 | 444 | # Using data2 folder 445 | def test_TopsSplitAnalyzer_data2_all_vh(): 446 | 447 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vh') 448 | s1.load_zip(zip_path=data2) 449 | 450 | ######################################################################## 451 | # Check metadata list. All 6 items should be loaded regardless of user input 452 | ######################################################################## 453 | 454 | expected = ['S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw1-slc-vh-20200929t214703-20200929t214728-034579-04068c-001.xml', 455 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw1-slc-vv-20200929t214703-20200929t214728-034579-04068c-004.xml', 456 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw2-slc-vh-20200929t214701-20200929t214726-034579-04068c-002.xml', 457 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw2-slc-vv-20200929t214701-20200929t214726-034579-04068c-005.xml', 458 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw3-slc-vh-20200929t214702-20200929t214727-034579-04068c-003.xml', 459 | 'S1A_IW_SLC__1SDV_20200929T214701_20200929T214728_034579_04068C_4E7A.SAFE/annotation/s1a-iw3-slc-vv-20200929t214702-20200929t214727-034579-04068c-006.xml'] 460 | 461 | actual = s1.metadata_file_list 462 | assert expected == actual, 'Metadata list does not match' 463 | 464 | ######################################################################## 465 | # Check shapefile data was generated 466 | ######################################################################## 467 | 468 | out_shp_items = [ 469 | os.path.join(TEST_DIR, 'swaths_all_vh.cpg'), 470 | os.path.join(TEST_DIR, 'swaths_all_vh.dbf'), 471 | os.path.join(TEST_DIR, 'swaths_all_vh.prj'), 472 | os.path.join(TEST_DIR, 'swaths_all_vh.shp'), 473 | os.path.join(TEST_DIR, 'swaths_all_vh.shx') 474 | ] 475 | s1.to_shapefile(out_shp_items[3]) 476 | for item in out_shp_items: 477 | expected = True 478 | actual = os.path.exists(item) 479 | assert actual == expected, f'Shapefile was not generated correctly. Missing: {item}' 480 | 481 | ######################################################################## 482 | # Check JSON was generated correctly 483 | ######################################################################## 484 | 485 | json_out = os.path.join(TEST_DIR, 'test_json.json') 486 | s1.to_json(json_out) 487 | 488 | # Read output JSON file 489 | with open(json_out) as json_file: 490 | data = json.load(json_file) 491 | assert isinstance(data, dict) == True, 'Error when loading JSON as Python dictionary' 492 | 493 | # Test contents 494 | expected = 'FeatureCollection' 495 | actual = data['type'] 496 | assert expected == actual, f'JSON data for "type" does not match' 497 | 498 | # Test id 0 499 | expected = {'burst': 1, 'subswath': 'IW1'} 500 | actual = data['features'][0]['properties'] 501 | assert expected == actual, f'JSON features with ID 0 do not match. Actual is {actual}. Expected is {expected}' 502 | # Test id 23 503 | expected = {'burst': 6, 'subswath': 'IW3'} 504 | actual = data['features'][23]['properties'] 505 | assert expected == actual, f'JSON features with ID 23 do not match. Actual is {actual}. Expected is {expected}' 506 | 507 | ######################################################################## 508 | # Check CSV was generated correctly 509 | ######################################################################## 510 | 511 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 512 | s1.to_csv(csv_out) 513 | 514 | df = pd.read_csv(csv_out) 515 | 516 | # Test CSV size 517 | expected = 27 518 | actual = len(df) 519 | assert expected == actual, f'CSV file does not match. Actual is {actual} rows. Expected {expected} rows' 520 | 521 | # Check id 0 522 | expected = 'IW1' 523 | actual = df.iloc[0]['subswath'] 524 | assert expected == actual, f'Subswath for row 0 does not match. Actual is {actual}. Expected is {expected}' 525 | 526 | expected = 1 527 | actual = df.iloc[0]['burst'] 528 | assert expected == actual, f'Burst for row 0 does not match. Actual is {actual}. Expected is {expected}' 529 | 530 | # Delete files after tests 531 | for item in out_shp_items: 532 | os.remove(item) 533 | os.remove(json_out) 534 | os.remove(csv_out) 535 | 536 | 537 | # Using data3 folder 538 | def test_TopsSplitAnalyzer_data3_all_vv(): 539 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], 540 | polarization='vv') 541 | s1.load_safe(safe_path=data3) 542 | 543 | ######################################################################## 544 | # Check metadata list. All 6 items should be loaded regardless of user 545 | # input 546 | ######################################################################## 547 | 548 | expected = [ 549 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw1-slc-vh-20200724t034334-20200724t034359-033591-03e49d-001.xml'), 550 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw1-slc-vv-20200724t034334-20200724t034359-033591-03e49d-004.xml'), 551 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw2-slc-vh-20200724t034335-20200724t034400-033591-03e49d-002.xml'), 552 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw2-slc-vv-20200724t034335-20200724t034400-033591-03e49d-005.xml'), 553 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw3-slc-vh-20200724t034336-20200724t034401-033591-03e49d-003.xml'), 554 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw3-slc-vv-20200724t034336-20200724t034401-033591-03e49d-006.xml') 555 | ] 556 | 557 | actual = s1.metadata_file_list 558 | assert Counter(expected) == Counter(actual), 'Metadata list does not match' 559 | 560 | ######################################################################## 561 | # Check shapefile data was generated 562 | ######################################################################## 563 | 564 | out_shp_items = [ 565 | os.path.join(TEST_DIR, 'swaths_all_vv.cpg'), 566 | os.path.join(TEST_DIR, 'swaths_all_vv.dbf'), 567 | os.path.join(TEST_DIR, 'swaths_all_vv.prj'), 568 | os.path.join(TEST_DIR, 'swaths_all_vv.shp'), 569 | os.path.join(TEST_DIR, 'swaths_all_vv.shx') 570 | ] 571 | 572 | s1.to_shapefile(out_shp_items[3]) 573 | for item in out_shp_items: 574 | expected = True 575 | actual = os.path.exists(item) 576 | assert actual == expected, f'Shapefile was not generated correctly. ' \ 577 | f'Missing: {item}' 578 | 579 | ######################################################################## 580 | # Check JSON was generated correctly 581 | ######################################################################## 582 | 583 | json_out = os.path.join(TEST_DIR, 'test_json.json') 584 | s1.to_json(json_out) 585 | 586 | # Read output JSON file 587 | with open(json_out) as json_file: 588 | data = json.load(json_file) 589 | assert isinstance(data, 590 | dict) == True, 'Error when loading JSON as Python ' \ 591 | 'dictionary' 592 | 593 | # Test contents 594 | expected = 'FeatureCollection' 595 | actual = data['type'] 596 | assert expected == actual, f'JSON data for "type" does not match' 597 | 598 | # Test id 0 599 | expected = {'burst': 1, 'subswath': 'IW1'} 600 | actual = data['features'][0]['properties'] 601 | assert expected == actual, f'JSON features with ID 0 do not ma' \ 602 | f'tch. Actual is {actual}. Expected is {expected}' 603 | # Test id 23 604 | expected = {'burst': 6, 'subswath': 'IW3'} 605 | actual = data['features'][23]['properties'] 606 | assert expected == actual, f'JSON features with ID 23 do not match. ' \ 607 | f'Actual is {actual}. Expected is {expected}' 608 | 609 | ######################################################################## 610 | # Check CSV was generated correctly 611 | ######################################################################## 612 | 613 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 614 | s1.to_csv(csv_out) 615 | 616 | df = pd.read_csv(csv_out) 617 | 618 | # Test CSV size 619 | expected = 27 620 | actual = len(df) 621 | assert expected == actual, f'CSV file does not match. Actual is ' \ 622 | f'{actual} rows. Expected {expected} rows' 623 | 624 | # Check id 0 625 | expected = 'IW1' 626 | actual = df.iloc[0]['subswath'] 627 | assert expected == actual, f'Subswath for row 0 does not match. ' \ 628 | f'Actual is {actual}. Expected is {expected}' 629 | 630 | expected = 1 631 | actual = df.iloc[0]['burst'] 632 | assert expected == actual, f'Burst for row 0 does not match. Actual ' \ 633 | f'is {actual}. Expected is {expected}' 634 | 635 | # Delete files after tests 636 | for item in out_shp_items: 637 | os.remove(item) 638 | os.remove(json_out) 639 | os.remove(csv_out) 640 | 641 | 642 | # Using data3 folder 643 | def test_TopsSplitAnalyzer_data3_all_vh(): 644 | 645 | s1 = TopsSplitAnalyzer(target_subswaths=['iw1', 'iw2', 'iw3'], polarization='vh') 646 | s1.load_safe(safe_path=data3) 647 | 648 | ######################################################################## 649 | # Check metadata list. All 6 items should be loaded regardless of user input 650 | ######################################################################## 651 | 652 | expected = [ 653 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw1-slc-vh-20200724t034334-20200724t034359-033591-03e49d-001.xml'), 654 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw1-slc-vv-20200724t034334-20200724t034359-033591-03e49d-004.xml'), 655 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw2-slc-vh-20200724t034335-20200724t034400-033591-03e49d-002.xml'), 656 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw2-slc-vv-20200724t034335-20200724t034400-033591-03e49d-005.xml'), 657 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw3-slc-vh-20200724t034336-20200724t034401-033591-03e49d-003.xml'), 658 | os.path.join(TEST_DIR, 'data3', 'S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE', 'annotation', 's1a-iw3-slc-vv-20200724t034336-20200724t034401-033591-03e49d-006.xml') 659 | ] 660 | 661 | actual = s1.metadata_file_list 662 | assert Counter(expected) == Counter(actual), 'Metadata list does not match' 663 | 664 | ######################################################################## 665 | # Check shapefile data was generated 666 | ######################################################################## 667 | 668 | out_shp_items = [ 669 | os.path.join(TEST_DIR, 'swaths_all_vh.cpg'), 670 | os.path.join(TEST_DIR, 'swaths_all_vh.dbf'), 671 | os.path.join(TEST_DIR, 'swaths_all_vh.prj'), 672 | os.path.join(TEST_DIR, 'swaths_all_vh.shp'), 673 | os.path.join(TEST_DIR, 'swaths_all_vh.shx') 674 | ] 675 | s1.to_shapefile(out_shp_items[3]) 676 | for item in out_shp_items: 677 | expected = True 678 | actual = os.path.exists(item) 679 | assert actual == expected, f'Shapefile was not generated correctly. Missing: {item}' 680 | 681 | ######################################################################## 682 | # Check JSON was generated correctly 683 | ######################################################################## 684 | 685 | json_out = os.path.join(TEST_DIR, 'test_json.json') 686 | s1.to_json(json_out) 687 | 688 | # Read output JSON file 689 | with open(json_out) as json_file: 690 | data = json.load(json_file) 691 | assert isinstance(data, dict) == True, 'Error when loading JSON as Python dictionary' 692 | 693 | # Test contents 694 | expected = 'FeatureCollection' 695 | actual = data['type'] 696 | assert expected == actual, f'JSON data for "type" does not match' 697 | 698 | # Test id 0 699 | expected = {'burst': 1, 'subswath': 'IW1'} 700 | actual = data['features'][0]['properties'] 701 | assert expected == actual, f'JSON features with ID 0 do not match. Actual is {actual}. Expected is {expected}' 702 | # Test id 23 703 | expected = {'burst': 6, 'subswath': 'IW3'} 704 | actual = data['features'][23]['properties'] 705 | assert expected == actual, f'JSON features with ID 23 do not match. Actual is {actual}. Expected is {expected}' 706 | 707 | ######################################################################## 708 | # Check CSV was generated correctly 709 | ######################################################################## 710 | 711 | csv_out = os.path.join(TEST_DIR, 'test_csv.csv') 712 | s1.to_csv(csv_out) 713 | 714 | df = pd.read_csv(csv_out) 715 | 716 | # Test CSV size 717 | expected = 27 718 | actual = len(df) 719 | assert expected == actual, f'CSV file does not match. Actual is {actual} rows. Expected {expected} rows' 720 | 721 | # Check id 0 722 | expected = 'IW1' 723 | actual = df.iloc[0]['subswath'] 724 | assert expected == actual, f'Subswath for row 0 does not match. Actual is {actual}. Expected is {expected}' 725 | 726 | expected = 1 727 | actual = df.iloc[0]['burst'] 728 | assert expected == actual, f'Burst for row 0 does not match. Actual is {actual}. Expected is {expected}' 729 | 730 | # Delete files after tests 731 | for item in out_shp_items: 732 | os.remove(item) 733 | os.remove(json_out) 734 | os.remove(csv_out) -------------------------------------------------------------------------------- /tests/data3/S1A_IW_SLC__1SDV_20200724T034334_20200724T034401_033591_03E49D_96AA.SAFE/manifest.safe: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 2014-016A 211 | SENTINEL-1 212 | A 213 | 214 | Synthetic Aperture Radar 215 | 216 | 217 | IW 218 | IW1 219 | IW2 220 | IW3 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 33591 233 | 33591 234 | 94 235 | 94 236 | 206 237 | 1 238 | 239 | 240 | DESCENDING 241 | 2020-07-24T03:03:39.729372 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 6 253 | 255133 254 | VV 255 | VH 256 | S 257 | SAR Standard L1 Product 258 | Slice 259 | SLC 260 | Fast-24h 261 | true 262 | 2020-07-24T03:37:41.465065 263 | 15 264 | 19 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 2020-07-24T03:43:34.179160 274 | 2020-07-24T03:44:01.265225 275 | 276 | 277 | 2.394450e+06 278 | 2.421536e+06 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 33.084751,35.654526 33.491138,32.942421 35.119877,33.267540 34.715481,36.034626 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 1e72bbc81486a58d4d2086f3b5fd15e2 328 | 329 | 330 | 331 | 332 | 333 | 80097564570c33cf3e0c590f0ba7b106 334 | 335 | 336 | 337 | 338 | 339 | dca0ea574288bca74e0d6d47e2ad39ee 340 | 341 | 342 | 343 | 344 | 345 | aa3821da9180dd3076db1681507eb3f0 346 | 347 | 348 | 349 | 350 | 351 | a874880962c55b87dcc85a56641b6439 352 | 353 | 354 | 355 | 356 | 357 | 7453995baf3ccc718e459ad00022b2af 358 | 359 | 360 | 361 | 362 | 363 | 977e17ebc54d8d8f70dc6e0566eb473c 364 | 365 | 366 | 367 | 368 | 369 | 30f9ba46bb329ae13e82471e80238353 370 | 371 | 372 | 373 | 374 | 375 | 9f2ecf6896659b1f6a8265b9aa430fa9 376 | 377 | 378 | 379 | 380 | 381 | cba8ec4338b3e97589920b51c779c907 382 | 383 | 384 | 385 | 386 | 387 | 28de173e195701a6dc3bedbaf777ecf5 388 | 389 | 390 | 391 | 392 | 393 | 282c61aa57233dac0db4970168bf0422 394 | 395 | 396 | 397 | 398 | 399 | 4b411f7cd0e5719cd2b5e3f562ef6451 400 | 401 | 402 | 403 | 404 | 405 | 9772282efbea3bad914b3f66de2e89c1 406 | 407 | 408 | 409 | 410 | 411 | 9c1366fbed441c0a7eff2762eae36242 412 | 413 | 414 | 415 | 416 | 417 | b743716f026e5f66b2bef642553eccce 418 | 419 | 420 | 421 | 422 | 423 | 8c82e4c754a6d667de965fb1fd98d03b 424 | 425 | 426 | 427 | 428 | 429 | 5fbb41ec653dfe4f899cdd03a49f76b7 430 | 431 | 432 | 433 | 434 | 435 | cf1800f972a42b902ffb71a617aa871b 436 | 437 | 438 | 439 | 440 | 441 | ddc93ef4f88c5dbce5cf2e03e7956dc7 442 | 443 | 444 | 445 | 446 | 447 | 54e7bb5c0a935c5271fcd76fab4a578d 448 | 449 | 450 | 451 | 452 | 453 | cd93832e1d48ca9c33fe5deb6e251015 454 | 455 | 456 | 457 | 458 | 459 | ba6318456c75762140f0e26cfaff5f10 460 | 461 | 462 | 463 | 464 | 465 | a00fa23fef77789d6572195bf75010b9 466 | 467 | 468 | 469 | 470 | 471 | f061c597d869ec75cebc3cda9a933933 472 | 473 | 474 | 475 | 476 | 477 | 5b70c27419d7ec58387d97c90550af97 478 | 479 | 480 | 481 | 482 | 483 | 4258767e64a731be10fcf86fa53e72ed 484 | 485 | 486 | 487 | 488 | --------------------------------------------------------------------------------