├── .env
├── figs
└── logo.png
├── setup
├── requirements.txt
└── update-snap.sh
├── .gitignore
├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
└── workflows
│ └── build-and-publish.yml
├── CONTRIBUTING.md
├── CHANGELOG.md
├── CITATION.cff
├── Dockerfile
├── scripts
├── 1_download_s1.py
├── 3_1_aux_data_download.py
├── 4_calculate_volume.py
├── 0_query_s1.py
├── 3_assess_accuracy.py
└── 2_dem_generation.py
├── CODE_OF_CONDUCT.md
├── LICENSE
└── README.md
/.env:
--------------------------------------------------------------------------------
1 | asf_login='USERNAME'
2 | asf_pwd='PASSWORD'
3 |
--------------------------------------------------------------------------------
/figs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SliDEM-project/SliDEM-python/HEAD/figs/logo.png
--------------------------------------------------------------------------------
/setup/requirements.txt:
--------------------------------------------------------------------------------
1 | sentinelsat
2 | python-dotenv
3 | asf_search
4 | geopandas==0.9.0
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .Rproj.user
2 | .Rhistory
3 | .RData
4 | .Ruserdata
5 | .env
6 | *.Rproj
7 | data/
8 | extra/
9 | docker_commands.txt
10 | .idea
--------------------------------------------------------------------------------
/setup/update-snap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/local/snap/bin/snap --nosplash --nogui --modules --refresh --update-all 2>&1 | while read -r line; do
4 | echo "$line"
5 | [ "$line" = "updates=0" ] && sleep 2 && pkill -TERM -f "snap/jre/bin/java"
6 | done
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Include the code you were running that generated the bug.
15 | ```
16 | write code here.
17 | ```
18 |
19 | **Expected behavior**
20 | A clear and concise description of what you expected to happen.
21 |
22 | **Additional context**
23 | Add any other context about the problem here.
24 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to contribute?
2 |
3 | We look very much forward to contributions to the package. This can be done in several ways:
4 |
5 | - Create a pull request with a specific feature implementation or bug fix.
6 | - Open an issue in the [issue tracker](https://github.com/SliDEM-project/SliDEM-python/issues) to request a specific feature or report a bug. Please use and follow the issue templates.
7 |
8 | ## Code of conduct
9 |
10 | This project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
11 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # v0.1.0 (develop)
2 | ## 05.10.2022
3 | ### Added
4 | - Volume calculation script parametrized
5 | ### Changes
6 | - 3_assess_accuracy.py has slightly changed input parameters.
7 | - `set_ndv` was replaced by `set_nodata`
8 |
9 | ## 21.09.2022
10 | ### Added
11 | - Upgrade to SNAP-9
12 | - Scripts included into Docker image
13 | - **Important!** call scripts as `scripts/___.py` instead of `home/scripts/___.py`
14 | ### Changed
15 | - `demcoreg` is no longer supported
16 |
17 | ## 10.08.2022
18 | ### Added
19 | - Accuracy assessment module
20 | - DEM co-registration within accuracy assessment module
21 | - Land-use/land-cover statistics for accuracy assessment
22 | - Download script for external DEM and land-use/land-cover data
23 | - Citation file
24 | ### Changed
25 | - Pipelines renamed
26 | ### Fixed
27 | - `argparser` boolean type [#37](https://github.com/SliDEM-project/SliDEM-python/issues/37)
28 | # v0.0.1
29 | ## 01.08.2022
30 | - First implementation
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Abad"
5 | given-names: "Lorena"
6 | orcid: "https://orcid.org/0000-0003-0554-734X"
7 | - family-names: "Hölbling"
8 | given-names: "Daniel"
9 | orcid: "https://orcid.org/0000-0001-9282-8072"
10 | - family-names: "Dabiri"
11 | given-names: "Zahra"
12 | orcid: "https://orcid.org/0000-0003-1015-1657"
13 | - family-names: "Robson"
14 | given-names: "Benjamin Aubrey"
15 | orcid: "https://orcid.org/0000-0002-4987-7378"
16 | title: "An open-source-based workflow for DEM generation from Sentinel-1 for landslide volume estimation"
17 | doi: 10.5194/isprs-archives-XLVIII-4-W1-2022-5-2022
18 | date-released: 2022-08-05
19 | url: "https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XLVIII-4-W1-2022/5/2022/"
20 | preferred-citation:
21 | type: article
22 | authors:
23 | - family-names: "Abad"
24 | given-names: "Lorena"
25 | orcid: "https://orcid.org/0000-0003-0554-734X"
26 | - family-names: "Hölbling"
27 | given-names: "Daniel"
28 | orcid: "https://orcid.org/0000-0001-9282-8072"
29 | - family-names: "Dabiri"
30 | given-names: "Zahra"
31 | orcid: "https://orcid.org/0000-0003-1015-1657"
32 | - family-names: "Robson"
33 | given-names: "Benjamin Aubrey"
34 | orcid: "https://orcid.org/0000-0002-4987-7378"
35 | doi: "https://doi.org/10.5194/isprs-archives-XLVIII-4-W1-2022-5-2022"
36 | journal: "The International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences"
37 | title: "An open-source-based workflow for DEM generation from Sentinel-1 for landslide volume estimation"
38 | volume: XLVIII-4/W1-2022
39 | year: 2022
--------------------------------------------------------------------------------
/.github/workflows/build-and-publish.yml:
--------------------------------------------------------------------------------
1 | name: Build and Publish
2 |
3 | on:
4 | # run it on push to the default repository branch
5 | push:
6 | branches: [main]
7 | # run it during pull request
8 | # pull_request:
9 |
10 | jobs:
11 | # define job to build and publish docker image
12 | build-and-push-docker-image:
13 | name: Build Docker image and push to repositories
14 | # run only when code is compiling and tests are passing
15 | runs-on: ubuntu-latest
16 |
17 | # steps to perform in job
18 | steps:
19 | - name: Checkout code
20 | uses: actions/checkout@v2
21 |
22 | # setup Docker build action
23 | - name: Set up Docker Buildx
24 | id: buildx
25 | uses: docker/setup-buildx-action@v1
26 |
27 | - name: Login to DockerHub
28 | uses: docker/login-action@v1
29 | with:
30 | username: ${{ secrets.DOCKERHUB_USERNAME }}
31 | password: ${{ secrets.DOCKERHUB_TOKEN }}
32 |
33 | - name: Login to Github Packages
34 | uses: docker/login-action@v1
35 | with:
36 | registry: ghcr.io
37 | username: ${{ github.actor }}
38 | password: ${{ secrets.GHCR_PAT }}
39 |
40 | - name: Build image and push to Docker Hub and GitHub Container Registry
41 | uses: docker/build-push-action@v2
42 | with:
43 | # relative path to the place where source code with Dockerfile is located
44 | context: ./
45 | # Note: tags has to be all lower-case
46 | tags: |
47 | loreabad6/slidem:latest
48 | ghcr.io/loreabad6/slidem:latest
49 | # build on feature branches, push only on main branch
50 | push: ${{ github.ref == 'refs/heads/main' }}
51 |
52 | - name: Image digest
53 | run: echo ${{ steps.docker_build.outputs.digest }}
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Get docker container from mundialis
2 | FROM mundialis/esa-snap:ubuntu
3 |
4 | # set locale
5 | ENV LC_ALL=C
6 |
7 | # Install stsa
8 | RUN git clone https://github.com/pbrotoisworo/s1-tops-split-analyzer.git
9 | ## After certain updates, support for python 3.6 was taken away, but I still need it!
10 | ## So I go back to a previous version (December 2021)
11 | WORKDIR ./s1-tops-split-analyzer
12 | RUN git reset --hard 12ea576989cce7cbff5569ece6d17df52a17b0a9
13 | RUN python3.6 -m pip install --upgrade pip
14 | RUN python3.6 -m pip install -e .
15 | WORKDIR ..
16 |
17 | ## Move local packages to tmp file
18 | COPY setup/requirements.txt /tmp/base_requirements.txt
19 |
20 | ## Install requirements for python
21 | RUN python3.6 -m pip install --upgrade pip
22 | RUN python3.6 -m pip install --no-cache-dir --upgrade -r /tmp/base_requirements.txt
23 |
24 | # Install snaphu
25 | RUN wget --no-check-certificate \
26 | https://web.stanford.edu/group/radar/softwareandlinks/sw/snaphu/snaphu-v2.0.5.tar.gz \
27 | && tar -xvf snaphu-v2.0.5.tar.gz \
28 | && rm snaphu-v2.0.5.tar.gz \
29 | && mkdir -p /usr/local/man/man1/ \
30 | && cd ./snaphu-v2.0.5/src \
31 | && make install \
32 | && make Clean
33 |
34 | # Install miniconda
35 | ENV PATH="/root/miniconda3/bin:${PATH}"
36 | ARG PATH="/root/miniconda3/bin:${PATH}"
37 | RUN apt-get update
38 |
39 | RUN apt-get install -y wget && rm -rf /var/lib/apt/lists/*
40 |
41 | RUN wget \
42 | https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
43 | && mkdir /root/.conda \
44 | && bash Miniconda3-latest-Linux-x86_64.sh -b \
45 | && rm -f Miniconda3-latest-Linux-x86_64.sh
46 |
47 | # Install xdem
48 | RUN git clone https://github.com/GlacioHack/xdem.git
49 | WORKDIR ./xdem
50 | RUN conda env create -f dev-environment.yml
51 | SHELL ["conda", "run", "-n", "xdem-dev", "/bin/bash", "-c"]
52 | RUN pip install -e .
53 | RUN conda install -y -c conda-forge bmi-topography python-dotenv seaborn pysal
54 | RUN pip install scicomap
55 | RUN conda init bash
56 |
57 | WORKDIR ..
58 |
59 | ## Move SliDEM scripts to /scripts
60 | ADD scripts /scripts
--------------------------------------------------------------------------------
/scripts/1_download_s1.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Import modules
4 | import argparse
5 | import asf_search as asf
6 | from dotenv import load_dotenv
7 | import os
8 | import pandas as pd
9 |
10 | # Arguments
11 | parser = argparse.ArgumentParser(
12 | description='''Download Sentinel-1 images that fit into a geographical region and
13 | within an specific time period. Needs credentials to ASF saved in a
14 | .env file saved on the directory mounted as a volume on the docker.
15 | Username should be save as `asf_login` and password as `asf_pwd`.
16 | This file is used for your own security, so that you don't accidentally
17 | share code with your credentials. However, if you want to skip this and
18 | pass your login and password to the command line, you can pass this to
19 | the `asf_login` and `asf_pwd`, respectively.
20 |
21 | The `query_result` file should have been edited by the user
22 | to change the Download column to TRUE for those pair of scenes that seem
23 | suitable for processing.
24 | ''',
25 | epilog='''
26 | Versions:
27 | v0.0.1 - 11/2021 - Download scenes from ASF repository
28 | Authors:
29 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at''',
30 | formatter_class=argparse.RawTextHelpFormatter
31 | )
32 | parser.add_argument(
33 | '--download_dir',
34 | type=str,
35 | default='data',
36 | help='relative path (refers to mounted volume) to the folder where S1 scenes will be downloaded'
37 | )
38 | parser.add_argument(
39 | '--query_result',
40 | type=str,
41 | help='''path to the CSV file with query results from 0_query_s1.py.
42 | Should be located in the specified download_dir.'''
43 | )
44 | parser.add_argument(
45 | '--asf_login',
46 | type=str,
47 | default=None,
48 | help='''Login information to ASF server: username'''
49 | )
50 | parser.add_argument(
51 | '--asf_pwd',
52 | type=str,
53 | default=None,
54 | help='''Login information to ASF server: password'''
55 | )
56 | args = parser.parse_args()
57 |
58 | os.chdir('home/')
59 |
60 | # Handle credentials
61 | if os.path.isfile('.env'):
62 | load_dotenv('.env')
63 | usrnm = os.environ.get('asf_login')
64 | passw = os.environ.get('asf_pwd')
65 | else:
66 | usrnm = args.asf_login
67 | passw = args.asf_pwd
68 |
69 | # Initiate session
70 | session = asf.ASFSession()
71 | session.auth_with_creds(usrnm, passw)
72 |
73 | # Download from URL list
74 | products = pd.read_csv(
75 | os.path.join(args.download_dir, args.query_result),
76 | sep=None, engine='python'
77 | )
78 | productsIn = products[products['Download']]
79 |
80 | refIDs = productsIn['ReferenceID'].tolist()
81 | matchIDs = productsIn['MatchID'].tolist()
82 | productIDs = list(set(refIDs + matchIDs))
83 |
84 | # Check if products are already on the download directory
85 | productList = [args.download_dir + "/" + f"{s}.zip" for s in productIDs]
86 | productExists = []
87 | for p in productList:
88 | productExists.append(os.path.exists(p))
89 |
90 | print("Existing scenes on directory: ", sum(productExists))
91 |
92 | productIDs_download = [d for (d, remove) in zip(productIDs, productExists) if not remove]
93 | print("Scenes to download: ", len(productIDs_download))
94 |
95 | urls = [f"https://datapool.asf.alaska.edu/SLC/SB/{s}.zip" for s in productIDs_download]
96 | asf.download_urls(urls=urls, path=args.download_dir, session=session, processes=4)
97 |
98 | print("All images downloaded!")
99 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | lorena.abad@plus.ac.at.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/scripts/3_1_aux_data_download.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from bmi_topography import Topography
3 | import os.path
4 | from dotenv import load_dotenv
5 | import geopandas as gpd
6 | import json
7 | from shapely.geometry import shape, GeometryCollection
8 | from tqdm.auto import tqdm # provides a progressbar
9 |
10 | # Arguments
11 | parser = argparse.ArgumentParser(
12 | description='''Auxiliary script to download reference DEM data
13 | through the OpenTopography API and WorldCover landuse/landcover
14 | data for your AOI. These can then be used for the accuracy assessment.
15 | Note that for the OpenTopography data you require to create
16 | a free account and get API key.
17 | More information here:
18 | https://opentopography.org/blog/introducing-api-keys-access-opentopography-global-datasets
19 | ''',
20 | epilog='''
21 | Versions:
22 | v0.0.1 - 08/2022 - First implementation
23 | Authors:
24 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at''',
25 | formatter_class=argparse.RawTextHelpFormatter
26 | )
27 | parser.add_argument(
28 | '--aoi_path',
29 | type=str,
30 | help='''path to GeoJSON file (WGS84 - EPSG:4326) with the study area outline.
31 | This area will be used to download a subset of the selected OpenTopography DEM.'''
32 | )
33 | parser.add_argument(
34 | '--download_dir',
35 | default='data',
36 | type=str,
37 | help='''relative path (refers to mounted volume) to the directory where'
38 | the reference DEM and LULC data for the study area should be downloaded.'''
39 | )
40 | parser.add_argument(
41 | '--opentopo_apikey',
42 | type=str,
43 | default=None,
44 | help='''API key to download data from the OpenTopography API.
45 | Can also be saved to the .env file under OPENTOPOGRAPHY_API_KEY.
46 | More information here:
47 | https://opentopography.org/blog/introducing-api-keys-access-opentopography-global-datasets'''
48 | )
49 | parser.add_argument(
50 | '--ref_dem_opentopo',
51 | type=str,
52 | default='NASADEM',
53 | help='''string referring to which DEM to use for error assessment.
54 | Can refer to any of the options provided by the OpenTopography API
55 | (https://bmi-topography.readthedocs.io/en/latest/).
56 | But of course, make sure the data you use overlaps your study area.'''
57 | )
58 | parser.add_argument(
59 | '--ref_dem_opentopo_download',
60 | type=bool,
61 | default=False,
62 | help='''Boolean to confirm actively for data download from the OpenTopography API.
63 | If you already downloaded the reference DEM for your AOI, please call it with the
64 | ref_dem_path argument instead of saturating the API.'''
65 | )
66 | parser.add_argument(
67 | '--lulc_worldcover_download',
68 | type=bool,
69 | default=False,
70 | help='''Boolean to confirm actively for WorldCover data download from the AWS S3 bucket.
71 | If you already downloaded the WorldCover data for your AOI, please call it with the
72 | lulc_path argument instead of saturating the API.'''
73 | )
74 | args = parser.parse_args()
75 |
76 | os.chdir('home/')
77 |
78 | if os.path.isfile('.env'):
79 | load_dotenv('.env')
80 | api_key = os.environ.get('OPENTOPOGRAPHY_API_KEY')
81 | else:
82 | api_key = args.opentopo_apikey
83 |
84 |
85 | def read_aoi(aoi=args.aoi_path, buffer=0):
86 | tempgeojson = os.path.join(args.output_dir, "temp_aoi.geojson")
87 | aoidf = gpd.read_file(aoi)
88 | if aoidf.crs != 'epsg:4326':
89 | aoidf = aoidf.to_crs(4326)
90 | aoidf.to_file(tempgeojson, driver="GeoJSON")
91 | aoi_file = tempgeojson
92 | else:
93 | aoi_file = aoi
94 | # Read aoi with shapely
95 | with open(aoi_file) as f:
96 | features = json.load(f)["features"]
97 |
98 | if os.path.exists(tempgeojson):
99 | os.remove(tempgeojson)
100 |
101 | return GeometryCollection(
102 | [shape(feature["geometry"]).buffer(buffer) for feature in features]
103 | )
104 |
105 |
106 | # Download reference DEM data
107 | if args.ref_dem_opentopo_download:
108 | aoi_bound = read_aoi()
109 |
110 | topo = Topography(
111 | dem_type=args.ref_dem_opentopo,
112 | west=aoi_bound.bounds[0], # minx
113 | south=aoi_bound.bounds[1], # miny
114 | east=aoi_bound.bounds[2], # maxx
115 | north=aoi_bound.bounds[3], # maxy
116 | output_format="GTiff",
117 | cache_dir=args.download_dir
118 | )
119 |
120 | fname = topo.fetch()
121 | print(
122 | 'Reference DEM data (', args.ref_dem_opentopo,
123 | ') for your AOI was downloaded here:', fname
124 | )
125 |
126 | if args.lulc_worldcover_download:
127 | # Download WorldCover data
128 | s3_url_prefix = "https://esa-worldcover.s3.eu-central-1.amazonaws.com"
129 |
130 | # Get geometry from AOI
131 | geom = read_aoi()
132 |
133 | # load worldcover grid
134 | url = f'{s3_url_prefix}/v100/2020/esa_worldcover_2020_grid.geojson'
135 | grid = gpd.read_file(url)
136 |
137 | # get grid tiles intersecting AOI
138 | tiles = grid[grid.intersects(geom)]
139 |
140 | for tile in tqdm(tiles.ll_tile):
141 | url = f'{s3_url_prefix}/v100/2020/map/ESA_WorldCover_10m_2020_v100_{tile}_Map.tif\n'
142 | print(
143 | 'Please copy this URL into your web browser\n'
144 | 'to download your WorldCover tile:', url,
145 | '\n PS. I know it is annoying but I cannot'
146 | ' figure out why requests does not work'
147 | )
148 | # r = requests.get(url, allow_redirects=True)
149 | # out_fn = f'{args.download_dir}/ESA_WorldCover_10m_2020_v100_{tile}_Map.tif'
150 | # with open(out_fn, 'wb') as f:
151 | # f.write(r.content)
152 |
--------------------------------------------------------------------------------
/scripts/4_calculate_volume.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path
3 | import xdem
4 | import csv
5 | import geoutils as gu
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 |
9 | # Arguments
10 | parser = argparse.ArgumentParser(
11 | description='''Calculate volumes from DEM differencing
12 | to characterise volumetric changes between post- and pre-event
13 | DEMs. The subtraction indicates then positive volumes as deposition
14 | and negative volumes as erosion. The computation also takes into
15 | account the error propagation of the pre- and post-event DEMs
16 | based on a given reference DEM, if given.
17 | ''',
18 | epilog='''
19 | Versions:
20 | v0.0.1 - 08/2022 - First implementation
21 | Authors:
22 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at''',
23 | formatter_class=argparse.RawTextHelpFormatter
24 | )
25 | parser.add_argument(
26 | '--output_dir',
27 | type=str,
28 | default='volume',
29 | help='''relative path (refers to mounted volume) to the directory where'
30 | results should be written into'''
31 | )
32 | parser.add_argument(
33 | '--unstable_area_path',
34 | type=str,
35 | help='''path to spatial file (WGS84 - EPSG:4326)
36 | with the outline for unstable areas (e.g. mass movement areas).
37 | This can include any other area that should be ignored during
38 | the computation of error propagation
39 | (e.g. water areas, dense vegetation, etc.).
40 | If absent then the whole AOI is used for error analysis.'''
41 | )
42 | parser.add_argument(
43 | '--dem_pre_path',
44 | type=str,
45 | help='''path to DEM corresponding to the pre-event time.'''
46 | )
47 | parser.add_argument(
48 | '--dem_pos_path',
49 | type=str,
50 | help='''path to DEM corresponding to the post-event time.'''
51 | )
52 | parser.add_argument(
53 | '--ref_dem_path',
54 | type=str,
55 | help='''path to external DEM to use for error assessment.
56 | Use 3_1_aux_data_download.py to download elevation
57 | data from OpenTopography for your area of interest.'''
58 | )
59 | parser.add_argument(
60 | '--no_data_value',
61 | type=float,
62 | default=-99999,
63 | help='''Value to use for no data, defaults to -99999'''
64 | )
65 | parser.add_argument(
66 | '--elev_diff_min_max',
67 | type=float,
68 | default=20,
69 | help='''Absolute value to plot elevation differences,
70 | what should be the minimum and maximum elevation difference value?
71 | Defaults to -20, +20.'''
72 | )
73 | args = parser.parse_args()
74 |
75 | os.chdir('home/')
76 |
77 | # Create output_dir if not existing
78 | if not os.path.exists(args.output_dir):
79 | os.mkdir(args.output_dir)
80 |
81 | # Read in DEMs
82 | dem_pre = xdem.DEM(args.dem_pre_path)
83 | dem_pos = xdem.DEM(args.dem_pos_path)
84 | reference_dem = xdem.DEM(args.ref_dem_path)
85 |
86 | # Get DEM names
87 | dem_pre_name = os.path.basename(args.dem_pre_path).split('.')[0]
88 | dem_pos_name = os.path.basename(args.dem_pos_path).split('.')[0]
89 |
90 | # Assign no data value if needed
91 | dem_pre.set_nodata(args.no_data_value)
92 | dem_pos.set_nodata(args.no_data_value)
93 |
94 | # Reproject reference DEM to S1 DEM
95 | dem_pos = dem_pos.reproject(dem_pre)
96 | reference_dem = reference_dem.reproject(dem_pre)
97 |
98 | # Unstable areas
99 | unstable = gu.Vector(args.unstable_area_path)
100 | unstable = gu.Vector(unstable.ds.to_crs(dem_pre.crs))
101 |
102 | dh = dem_pos - dem_pre
103 | dh.save(os.path.join(
104 | args.output_dir,
105 | "dod_" + dem_pos_name + "_" + dem_pre_name + ".tif"
106 | ))
107 | mask = ~unstable.create_mask(dem_pre)
108 |
109 | # Compute errors only on stable terrain
110 | ref_pre = (reference_dem - dem_pre)
111 | ref_pre.set_mask(np.invert(mask))
112 |
113 | ref_pos = (reference_dem - dem_pos)
114 | ref_pos.set_mask(np.invert(mask))
115 |
116 | # Compute elevation difference on unstable terrain
117 | difference = dh.copy()
118 | difference.set_mask(mask)
119 | difference.save(os.path.join(
120 | args.output_dir,
121 | "dod_" + dem_pos_name + "_" + dem_pre_name + "_crop.tif"
122 | ))
123 |
124 | difference_source = difference.copy()
125 | mask_source = difference_source.data > 0
126 | difference_source.set_mask(mask_source)
127 |
128 | difference_depost = difference.copy()
129 | mask_depost = difference_depost.data < 0
130 | difference_depost.set_mask(mask_depost)
131 |
132 | area_pixel = dem_pre.res[0] * dem_pre.res[1]
133 |
134 | n_tot = dem_pre.shape[0] * dem_pre.shape[1]
135 | n = np.sqrt(n_tot * dem_pre.res[0])/(2 * (20 * dem_pre.res[0]))
136 | sde_pre = np.nanstd(ref_pre.data)/n
137 | sde_pos = np.nanstd(ref_pos.data)/n
138 | prop_error_sde = np.sqrt((sde_pre ** 2) + (sde_pos ** 2))
139 |
140 | nmad_pre = xdem.spatialstats.nmad(ref_pre.data)
141 | nmad_pos = xdem.spatialstats.nmad(ref_pos.data)
142 | prop_error_nmad = np.sqrt((nmad_pre ** 2) + (nmad_pos ** 2))
143 |
144 |
145 | def calc_total_area(diff):
146 | return (np.count_nonzero(diff.data) - np.sum(np.isnan(diff.get_nanarray()))) * area_pixel
147 |
148 |
149 | def calc_pixels(diff):
150 | return np.count_nonzero(diff.data) - np.sum(np.isnan(diff.get_nanarray()))
151 |
152 |
153 | def calc_volume(diff):
154 | return np.nansum(diff.data * area_pixel)
155 |
156 |
157 | def calc_volume_error(area_tot, prop_error):
158 | return area_tot * prop_error
159 |
160 |
161 | # Write error measurements to CSV file
162 | header = [
163 | 'dem_pre', 'dem_pos',
164 | 'ref_dem',
165 | 'nmad_dem_pre_m', 'nmad_dem_pos_m',
166 | 'sde_dem_pre_m', 'sde_dem_pos_m',
167 | 'zone', 'area_m2', 'pixels', 'vol_m3',
168 | 'nmad_prop_err_m',
169 | 'vol_error_nmad_m3',
170 | 'sde_prop_err_m',
171 | 'vol_error_sde_m3'
172 | ]
173 |
174 | csv_file = os.path.join(
175 | args.output_dir,
176 | "volume_estimates.csv"
177 | )
178 |
179 | if not os.path.exists(csv_file):
180 | with open(csv_file, 'w', encoding='UTF8') as f:
181 | writer = csv.writer(f)
182 |
183 | # write the header
184 | writer.writerow(header)
185 |
186 | with open(csv_file, 'a', encoding='UTF8') as f:
187 | writer = csv.writer(f)
188 | # write the data
189 | diff_ls = [difference, difference_source, difference_depost]
190 | diff_nm = ['total', 'source', 'deposition']
191 | for i in range(3):
192 | data = [
193 | args.dem_pre_path, args.dem_pos_path,
194 | args.ref_dem_path,
195 | nmad_pre, nmad_pos,
196 | sde_pre, sde_pos,
197 | diff_nm[i],
198 | calc_total_area(diff_ls[i]),
199 | calc_pixels(diff_ls[i]),
200 | calc_volume(diff_ls[i]),
201 | prop_error_nmad,
202 | calc_volume_error(calc_total_area(diff_ls[i]), prop_error_nmad),
203 | prop_error_sde,
204 | calc_volume_error(calc_total_area(diff_ls[i]), prop_error_sde)
205 | ]
206 | writer.writerow(data)
207 |
208 | plt.title('Elevation difference (m) in landslide area\nPost-event minus Pre-event')
209 | plt.hist(difference.data.flatten(),
210 | bins=np.linspace(-args.elev_diff_min_max, args.elev_diff_min_max, 100))
211 | plt.savefig(
212 | os.path.join(
213 | args.output_dir,
214 | "elevdiff_" + dem_pos_name + "_" + dem_pre_name + "_hist.png"
215 | )
216 | )
217 | plt.close()
218 |
219 | dem_ls = [dem_pre, dem_pos, dh]
220 | # Plot
221 | plt.figure(figsize=(16, 4), dpi=300.0, tight_layout=True)
222 | plt_extent = [
223 | dh.bounds.left,
224 | dh.bounds.right,
225 | dh.bounds.bottom,
226 | dh.bounds.top,
227 | ]
228 | cmaps = ['viridis', 'viridis', 'PuOr']
229 | vlims = [(None, None) for i in range(3)]
230 | vlims[0] = [np.nanmin(dem_pre.data), np.nanmax(dem_pre.data)]
231 | vlims[1] = vlims[0]
232 | vlims[2] = [-args.elev_diff_min_max, args.elev_diff_min_max]
233 | labels = [
234 | 'Elevation (m)',
235 | 'Elevation (m)',
236 | 'Elevation difference (m)'
237 | ]
238 | titles = [
239 | 'Pre-event DEM',
240 | 'Post-event DEM',
241 | 'Post-event - Pre-event'
242 | ]
243 | for i in range(3):
244 | plt.subplot(1, 3, i + 1)
245 | ax = plt.gca()
246 | unstable.ds.plot(ax=ax, fc='none', ec='k')
247 | ax.plot([], [], color='k', label='Unstable areas')
248 | plt.imshow(
249 | dem_ls[i].data.squeeze(),
250 | cmap=cmaps[i],
251 | extent=plt_extent,
252 | vmin=vlims[i][0], vmax=vlims[i][1]
253 | )
254 | cbar = plt.colorbar()
255 | cbar.set_label(labels[i])
256 | plt.xticks([])
257 | plt.yticks([])
258 | plt.legend(loc='lower right')
259 | plt.title(titles[i])
260 |
261 | plt.savefig(
262 | os.path.join(
263 | args.output_dir,
264 | "elevdiff_" + dem_pos_name + "_" + dem_pre_name + ".png"
265 | )
266 | )
267 | plt.close()
268 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and
10 | distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by the copyright
13 | owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all other entities
16 | that control, are controlled by, or are under common control with that entity.
17 | For the purposes of this definition, "control" means (i) the power, direct or
18 | indirect, to cause the direction or management of such entity, whether by
19 | contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
20 | outstanding shares, or (iii) beneficial ownership of such entity.
21 |
22 | "You" (or "Your") shall mean an individual or Legal Entity exercising
23 | permissions granted by this License.
24 |
25 | "Source" form shall mean the preferred form for making modifications, including
26 | but not limited to software source code, documentation source, and configuration
27 | files.
28 |
29 | "Object" form shall mean any form resulting from mechanical transformation or
30 | translation of a Source form, including but not limited to compiled object code,
31 | generated documentation, and conversions to other media types.
32 |
33 | "Work" shall mean the work of authorship, whether in Source or Object form, made
34 | available under the License, as indicated by a copyright notice that is included
35 | in or attached to the work (an example is provided in the Appendix below).
36 |
37 | "Derivative Works" shall mean any work, whether in Source or Object form, that
38 | is based on (or derived from) the Work and for which the editorial revisions,
39 | annotations, elaborations, or other modifications represent, as a whole, an
40 | original work of authorship. For the purposes of this License, Derivative Works
41 | shall not include works that remain separable from, or merely link (or bind by
42 | name) to the interfaces of, the Work and Derivative Works thereof.
43 |
44 | "Contribution" shall mean any work of authorship, including the original version
45 | of the Work and any modifications or additions to that Work or Derivative Works
46 | thereof, that is intentionally submitted to Licensor for inclusion in the Work
47 | by the copyright owner or by an individual or Legal Entity authorized to submit
48 | on behalf of the copyright owner. For the purposes of this definition,
49 | "submitted" means any form of electronic, verbal, or written communication sent
50 | to the Licensor or its representatives, including but not limited to
51 | communication on electronic mailing lists, source code control systems, and
52 | issue tracking systems that are managed by, or on behalf of, the Licensor for
53 | the purpose of discussing and improving the Work, but excluding communication
54 | that is conspicuously marked or otherwise designated in writing by the copyright
55 | owner as "Not a Contribution."
56 |
57 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf
58 | of whom a Contribution has been received by Licensor and subsequently
59 | incorporated within the Work.
60 |
61 | 2. Grant of Copyright License.
62 |
63 | Subject to the terms and conditions of this License, each Contributor hereby
64 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
65 | irrevocable copyright license to reproduce, prepare Derivative Works of,
66 | publicly display, publicly perform, sublicense, and distribute the Work and such
67 | Derivative Works in Source or Object form.
68 |
69 | 3. Grant of Patent License.
70 |
71 | Subject to the terms and conditions of this License, each Contributor hereby
72 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
73 | irrevocable (except as stated in this section) patent license to make, have
74 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where
75 | such license applies only to those patent claims licensable by such Contributor
76 | that are necessarily infringed by their Contribution(s) alone or by combination
77 | of their Contribution(s) with the Work to which such Contribution(s) was
78 | submitted. If You institute patent litigation against any entity (including a
79 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a
80 | Contribution incorporated within the Work constitutes direct or contributory
81 | patent infringement, then any patent licenses granted to You under this License
82 | for that Work shall terminate as of the date such litigation is filed.
83 |
84 | 4. Redistribution.
85 |
86 | You may reproduce and distribute copies of the Work or Derivative Works thereof
87 | in any medium, with or without modifications, and in Source or Object form,
88 | provided that You meet the following conditions:
89 |
90 | You must give any other recipients of the Work or Derivative Works a copy of
91 | this License; and
92 | You must cause any modified files to carry prominent notices stating that You
93 | changed the files; and
94 | You must retain, in the Source form of any Derivative Works that You distribute,
95 | all copyright, patent, trademark, and attribution notices from the Source form
96 | of the Work, excluding those notices that do not pertain to any part of the
97 | Derivative Works; and
98 | If the Work includes a "NOTICE" text file as part of its distribution, then any
99 | Derivative Works that You distribute must include a readable copy of the
100 | attribution notices contained within such NOTICE file, excluding those notices
101 | that do not pertain to any part of the Derivative Works, in at least one of the
102 | following places: within a NOTICE text file distributed as part of the
103 | Derivative Works; within the Source form or documentation, if provided along
104 | with the Derivative Works; or, within a display generated by the Derivative
105 | Works, if and wherever such third-party notices normally appear. The contents of
106 | the NOTICE file are for informational purposes only and do not modify the
107 | License. You may add Your own attribution notices within Derivative Works that
108 | You distribute, alongside or as an addendum to the NOTICE text from the Work,
109 | provided that such additional attribution notices cannot be construed as
110 | modifying the License.
111 | You may add Your own copyright statement to Your modifications and may provide
112 | additional or different license terms and conditions for use, reproduction, or
113 | distribution of Your modifications, or for any such Derivative Works as a whole,
114 | provided Your use, reproduction, and distribution of the Work otherwise complies
115 | with the conditions stated in this License.
116 |
117 | 5. Submission of Contributions.
118 |
119 | Unless You explicitly state otherwise, any Contribution intentionally submitted
120 | for inclusion in the Work by You to the Licensor shall be under the terms and
121 | conditions of this License, without any additional terms or conditions.
122 | Notwithstanding the above, nothing herein shall supersede or modify the terms of
123 | any separate license agreement you may have executed with Licensor regarding
124 | such Contributions.
125 |
126 | 6. Trademarks.
127 |
128 | This License does not grant permission to use the trade names, trademarks,
129 | service marks, or product names of the Licensor, except as required for
130 | reasonable and customary use in describing the origin of the Work and
131 | reproducing the content of the NOTICE file.
132 |
133 | 7. Disclaimer of Warranty.
134 |
135 | Unless required by applicable law or agreed to in writing, Licensor provides the
136 | Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
137 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
138 | including, without limitation, any warranties or conditions of TITLE,
139 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
140 | solely responsible for determining the appropriateness of using or
141 | redistributing the Work and assume any risks associated with Your exercise of
142 | permissions under this License.
143 |
144 | 8. Limitation of Liability.
145 |
146 | In no event and under no legal theory, whether in tort (including negligence),
147 | contract, or otherwise, unless required by applicable law (such as deliberate
148 | and grossly negligent acts) or agreed to in writing, shall any Contributor be
149 | liable to You for damages, including any direct, indirect, special, incidental,
150 | or consequential damages of any character arising as a result of this License or
151 | out of the use or inability to use the Work (including but not limited to
152 | damages for loss of goodwill, work stoppage, computer failure or malfunction, or
153 | any and all other commercial damages or losses), even if such Contributor has
154 | been advised of the possibility of such damages.
155 |
156 | 9. Accepting Warranty or Additional Liability.
157 |
158 | While redistributing the Work or Derivative Works thereof, You may choose to
159 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or
160 | other liability obligations and/or rights consistent with this License. However,
161 | in accepting such obligations, You may act only on Your own behalf and on Your
162 | sole responsibility, not on behalf of any other Contributor, and only if You
163 | agree to indemnify, defend, and hold each Contributor harmless for any liability
164 | incurred by, or claims asserted against, such Contributor by reason of your
165 | accepting any such warranty or additional liability.
166 |
167 | END OF TERMS AND CONDITIONS
168 |
169 | APPENDIX: How to apply the Apache License to your work
170 |
171 | To apply the Apache License to your work, attach the following boilerplate
172 | notice, with the fields enclosed by brackets "[]" replaced with your own
173 | identifying information. (Don't include the brackets!) The text should be
174 | enclosed in the appropriate comment syntax for the file format. We also
175 | recommend that a file or class name and description of purpose be included on
176 | the same "printed page" as the copyright notice for easier identification within
177 | third-party archives.
178 |
179 | Copyright [yyyy] [name of copyright owner]
180 |
181 | Licensed under the Apache License, Version 2.0 (the "License");
182 | you may not use this file except in compliance with the License.
183 | You may obtain a copy of the License at
184 |
185 | http://www.apache.org/licenses/LICENSE-2.0
186 |
187 | Unless required by applicable law or agreed to in writing, software
188 | distributed under the License is distributed on an "AS IS" BASIS,
189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190 | See the License for the specific language governing permissions and
191 | limitations under the License.
--------------------------------------------------------------------------------
/scripts/0_query_s1.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Import modules
4 | import argparse
5 | import asf_search as asf
6 | import geopandas as gpd
7 | import logging
8 | import numpy as np
9 | import os
10 | import pandas as pd
11 | from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
12 | import requests
13 |
14 | pd.options.mode.chained_assignment = None # default='warn'
15 | # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas
16 | logging.basicConfig(format='%(message)s', level='INFO')
17 |
18 | # Arguments
19 | parser = argparse.ArgumentParser(
20 | description='''Query Sentinel-1 scenes that fit into a geographical region and
21 | within an specific time period.
22 | Uses the ASF repository to query scenes by a wkt region and a specific temporal
23 | range. The resulting scenes will go then into a loop to find matching scenes
24 | using the baseline tool from ASF.
25 |
26 | The output is a CSV file with the scenes matching the wkt and temporal arguments,
27 | the matching IDs with a perpendicular and temporal baseline set by the user, and a
28 | URL link to check for atmospheric conditions for the dates in the SentinelHub Explorer.
29 |
30 | Not every matching scene is also overlapping the geographical and temporal settings,
31 | hence a column `inAOInDates` is also included, where TRUE values indicate an overlap of
32 | both scenes temporally and geographically.
33 |
34 | The user is prompted now to check the file and update the `Download` column manually
35 | according to the scenes that they deem useful.
36 | ''',
37 | epilog='''
38 | Versions:
39 | v0.0.0 - 06/2019 - Download from SentinelHub repository
40 | v0.0.1 - 11/2021 - Query from ASF repository
41 | Authors:
42 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at
43 | Benjamin Robson - University of Bergen''',
44 | formatter_class=argparse.RawTextHelpFormatter
45 | )
46 | parser.add_argument(
47 | '--download_dir',
48 | type=str,
49 | default='data',
50 | help='''relative path (refers to mounted volume) to the folder
51 | where the query_result CSV file should be written to.'''
52 | )
53 | parser.add_argument(
54 | '--query_result',
55 | type=str,
56 | help='''path to the CSV file with query results from 0_query_s1.py.
57 | Should be located in the specified download folder.'''
58 | )
59 | parser.add_argument(
60 | '--date_start',
61 | type=str,
62 | help='''start date of S1 scene query'''
63 | )
64 | parser.add_argument(
65 | '--date_end',
66 | type=str,
67 | help='''start date of S1 scene query'''
68 | )
69 | parser.add_argument(
70 | '--aoi_path',
71 | type=str,
72 | help='''path to GeoJSON file (WGS84 - EPSG:4326) with the study area outline.
73 | Any scenes intersecting this area will be included in the query result'''
74 | )
75 | parser.add_argument(
76 | '--btempth',
77 | type=float,
78 | default=60,
79 | help='''temporal baseline threshold to query matching scenes.
80 | What is the maximum time that matching scenes should have between each other?
81 | Defaults to 60 days.
82 | This is checked forward and backwards.'''
83 | )
84 | parser.add_argument(
85 | '--bperpth_min',
86 | type=float,
87 | default=140,
88 | help='''perpendicular baseline threshold to query matching scenes.
89 | What is the minimum perpendicular baseline between matching scenes?
90 | Defaults to 140 meters.
91 | This is checked forward and backwards.'''
92 | )
93 | parser.add_argument(
94 | '--bperpth_max',
95 | type=float,
96 | default=300,
97 | help='''perpendicular baseline threshold to query matching scenes.
98 | What is the maximum perpendicular baseline between matching scenes?
99 | Defaults to 300 meters.
100 | This is checked forward and backwards.'''
101 | )
102 | args = parser.parse_args()
103 |
104 | os.chdir('home/')
105 |
106 | # Create download directory if not existing
107 | if not os.path.exists(args.download_dir):
108 | os.mkdir(args.download_dir)
109 |
110 | # Setup params
111 | aoidf = gpd.read_file(args.aoi_path)
112 | if aoidf.crs != 'epsg:4326':
113 | aoidf = aoidf.to_crs(4326)
114 | tempgeojson = os.path.join(args.download_dir, "temp_aoi.geojson")
115 | aoidf.to_file(tempgeojson, driver="GeoJSON")
116 | footprint = geojson_to_wkt(read_geojson(tempgeojson))
117 | os.remove(tempgeojson)
118 | else:
119 | footprint = geojson_to_wkt(read_geojson(args.aoi_path))
120 |
121 | dates = '[' + args.date_start + 'T00:00:00.000Z TO ' + args.date_end + 'T00:00:00.000Z]'
122 | tempfile1 = 'tmpgeo.csv'
123 | # Repository to query, can be sentinelhub or asf
124 | repo = 'asf'
125 |
126 | # Connect to API and search
127 | print("Connecting to API and searching images, depending on your AOI size and time period,"
128 | " this process may take a while. Be patient :)")
129 | if repo == 'sentinelhub': # not active currently, seems to only query recent images?
130 | # Connect to Sentinel API
131 | # Norway mirror:
132 | api_url = 'https://colhub.met.no/'
133 | # Austria mirror:
134 | # api_url = 'https://data.sentinel.zamg.ac.at/'
135 | username = os.environ.get('hub_login_br')
136 | password = os.environ.get('hub_pwd_br')
137 | api = SentinelAPI(
138 | username,
139 | password,
140 | api_url=api_url,
141 | show_progressbars=True
142 | )
143 |
144 | # Additional parameters can be set here, for example the processing level.
145 | # Valid search query keywords can be found at the Copernicus Open Access Hub documentation.
146 | # (https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/FullTextSearch?redirectedfrom=SciHubUserGuide.3FullTextSearch)
147 | products = api.query(
148 | footprint,
149 | dates,
150 | platformname='Sentinel-1',
151 | producttype='SLC'
152 | )
153 |
154 | # Convert list of available images to Pandas DataFrame
155 | products_df = api.to_dataframe(products)
156 |
157 | # Write to CSV file
158 | file_name = os.path.join(args.download_dir, tempfile1)
159 | products_df.to_csv(file_name, index=False)
160 | elif repo == "asf":
161 | products = asf.geo_search(platform=[asf.PLATFORM.SENTINEL1],
162 | intersectsWith=footprint,
163 | processingLevel=[asf.PRODUCT_TYPE.SLC],
164 | start=args.date_start,
165 | end=args.date_end,
166 | maxResults=1000)
167 | products_df = pd.DataFrame([p.properties for p in products])
168 |
169 | # Write to CSV file
170 | file_name = os.path.join(args.download_dir, tempfile1)
171 | products_df.to_csv(file_name, index=False)
172 | else:
173 | print("Repository not supported.")
174 |
175 | # Create empty list to hold the results
176 | candidates = []
177 |
178 | # Read scene IDs
179 | # Get ids for filtered images
180 | geo_prod = pd.read_csv(os.path.join(args.download_dir, tempfile1))
181 | geo_ids = geo_prod['fileID'].map(lambda fileID: str.replace(fileID, '-SLC', '')).tolist()
182 |
183 | # Loop over geo_ids to get matching scenes with desired temporal and perpendicular baselines
184 | tempfile2 = 'tmpbaseline.csv'
185 | for i in range(0, len(geo_ids)):
186 | order_url = "https://api.daac.asf.alaska.edu/services/search/baseline?reference="
187 | scene_id = geo_ids[i]
188 | output_type = "&output=csv"
189 | url = order_url + scene_id + output_type
190 | response = requests.post(url)
191 |
192 | # Write to .CSV
193 | baseline_file = os.path.join(args.download_dir, tempfile2)
194 | f = open(baseline_file, "w")
195 | f.write(response.text)
196 | f.close()
197 |
198 | # Read in CSV file
199 | baseline = pd.read_csv(baseline_file).replace(to_replace='None', value=np.nan)
200 | baseline = baseline[(baseline.TemporalBaseline.notnull()) &
201 | (baseline.PerpendicularBaseline.notnull())]
202 | baseline[['TemporalBaseline', 'PerpendicularBaseline']] = \
203 | baseline[['TemporalBaseline', 'PerpendicularBaseline']].apply(pd.to_numeric)
204 |
205 | baseline_filter = baseline[(abs(baseline['TemporalBaseline']) <= args.btempth) &
206 | (abs(baseline['PerpendicularBaseline']) >= args.bperpth_min) &
207 | (abs(baseline['PerpendicularBaseline']) <= args.bperpth_max)]
208 | baseline_df = baseline_filter[
209 | ['Granule Name', 'Path Number', 'Ascending or Descending?',
210 | 'TemporalBaseline', 'PerpendicularBaseline']]
211 | baseline_df.rename(columns={'Granule Name': 'MatchID',
212 | 'Path Number': 'Orbit',
213 | 'Ascending or Descending?': 'Pass'},
214 | inplace=True)
215 | baseline_df.insert(0, 'ReferenceID', scene_id, True)
216 |
217 | candidates.append(baseline_df)
218 |
219 | # Merge all dataframes
220 | candidates_df = pd.concat(candidates)
221 |
222 | # Extract dates from IDs
223 | candidates_df['ReferenceDate'] = pd.to_datetime(
224 | candidates_df['ReferenceID'].str.slice(start=17, stop=25),
225 | format='%Y%m%d'
226 | )
227 | candidates_df['MatchDate'] = pd.to_datetime(
228 | candidates_df['MatchID'].str.slice(start=17, stop=25),
229 | format='%Y%m%d'
230 | )
231 |
232 | # Check if matched ids are also intersecting with the AOI and dates set
233 | candidates_df['inAOInDates'] = candidates_df['MatchID'].isin(geo_ids)
234 |
235 | # Create column where user can mark if download should be done or not
236 | candidates_df['Download'] = False
237 |
238 | # Create column with link to eo-browser to check for snow conditions using the NDSI
239 | aoidf['center'] = aoidf['geometry'].centroid
240 | aoi_lat = aoidf.center.y.astype(str)[0]
241 | aoi_lng = aoidf.center.x.astype(str)[0]
242 |
243 | candidates_df['EObrowser'] = ('https://apps.sentinel-hub.com/eo-browser/' +
244 | '?zoom=14&lat=' + aoi_lat +
245 | '&lng=' + aoi_lng +
246 | '&themeId=DEFAULT-THEME&datasetId=S2L1C&fromTime=' +
247 | candidates_df['ReferenceDate'].astype(str) +
248 | 'T00%3A00%3A00.000Z&toTime=' +
249 | candidates_df['ReferenceDate'].astype(str) +
250 | 'T23%3A59%3A59.999Z&layerId=8-NDSI')
251 |
252 | # Sort by intersected, True on top
253 | candidates_df.sort_values(by=['inAOInDates'], inplace=True, ascending=False)
254 |
255 | # Write to CSV file and remove temporal files
256 | file_name = os.path.join(args.download_dir, args.query_result)
257 | candidates_df.to_csv(file_name, index=False)
258 | os.remove(os.path.join(args.download_dir, tempfile1))
259 | os.remove(os.path.join(args.download_dir, tempfile2))
260 | print("CSV file with images to be processed has been written to " + file_name)
261 | print("Now it is your turn! Open the file and check the potential S1 pairs, "
262 | "which of them would you want to download? Update the Download column to TRUE "
263 | "to set those scene pairs you would like to download and process.")
264 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.repostatus.org/#wip)
2 | [](https://zenodo.org/doi/10.5281/zenodo.10634421)
3 |
4 |
5 |
6 | # SliDEM
7 | Assessing the suitability of DEMs derived from Sentinel-1 for landslide volume estimation.
8 |
9 | ## Goal
10 | The overall goal of SliDEM is to assess the potential for determining landslide volumes based on digital elevation models (DEMs) derived from Sentinel-1 SAR data. Therefore, we will develop a low-cost, transferable, semi-automated method implemented within a Python package based on open-source tools.
11 |
12 | Find the project updates on [ResearchGate](https://www.researchgate.net/project/SliDEM-Assessing-the-suitability-of-DEMs-derived-from-Sentinel-1-for-landslide-volume-estimation) and check our [Publications & Conference proceedings](https://github.com/SliDEM-project/SliDEM-python/wiki/Publications-&-Conferences) for more details.
13 |
14 | ---
15 |
16 | **NOTES!**
17 |
18 | Even if we call it the SliDEM package, the structure of a package is not there yet.
19 | We are currently developing this repository actively and hope to have a working package soon.
20 |
21 | **We have implemented a [changelog](CHANGELOG.md)**, please check it frequently for updates, including new ways to call
22 | the scripts or changes in parameters.
23 |
24 | Currently, we present a series of executable scripts to run within a Docker container.
25 | You will see the instructions on how to set it up and start running the scripts below.
26 |
27 | ---
28 | ## Setup
29 |
30 | To run the scripts inside a docker container, follow these steps:
31 |
32 | 1. Install Docker if you do not have it already
33 | - See instructions for [Windows here](https://docs.docker.com/desktop/windows/install/) and for [Linux here](https://docs.docker.com/engine/install/).
34 |
35 | 2. Create a container to work on
36 | - Go to you terminal and type the command below.
37 | - You can mount a volume into the container.
38 | - We recommend having a `data` folder where all the data can be included and through the volume, it can also be accessed inside docker.
39 | - What the command does:
40 | - `docker run` is the command to run an image through a container
41 | - `-it` calls an interactive process (like a shell)
42 | - `--entrypoint /bin/bash` will start your container in bash
43 | - `--name snap` gives a name to your image, so you can refer to it later
44 | - `-v PATH_TO_DIR/SliDEM-python:/home/` mounts a volume on your container.
45 | Replace `PATH_TO_DIR` with the path of the directory you wish to mount
46 | - `--pull=always` will update the Docker image to the latest available on Docker Hub
47 | - `loreabad6/slidem` is the Docker image available on DockerHub for this project
48 |
49 | ```
50 | docker run -it --entrypoint /bin/bash --name snap -v PATH_TO_DIR/SliDEM-python:/home/ --pull=always loreabad6/slidem
51 | ```
52 |
53 | 3. You can remove the container once you are done. All results should be written to the mounted volume, but of course make sure that this is well set in the parameters when calling the scripts.
54 | - You can exit your container by doing `CTRL+D`
55 | - you can delete the container with:
56 | ```
57 | docker stop snap
58 | docker rm snap
59 | ```
60 | - If you don't want to delete your container after use, then just **exit** it, **stop** it, and next time you want to use it run:
61 | ```
62 | docker start snap
63 | docker exec -it snap /bin/bash
64 | ```
65 |
66 | 4. Using `xdem`:
67 | - Given the different dependencies for this module, you should use the virtual environment created for it.
68 |
69 | ```commandline
70 | # to activate:
71 | conda activate xdem-dev
72 |
73 | # to deactivate:
74 | conda deactivate
75 | ```
76 | - Please test that the configuration when building the docker container
77 | was correct with (this might take several minutes):
78 |
79 | ```commandline
80 | cd xdem
81 | pytest -rA
82 | ```
83 |
84 | ## Workflow
85 |
86 | So far, steps are organized into 4 executable scripts:
87 | 1. Query S1 data
88 | 2. Download S1 data
89 | 3. Compute DEM from S1 data
90 | 4. Calculate DEM accuracy
91 |
92 | The scripts are included within the Docker image, and therefore are inside the
93 | container you created in the folder `scripts`.
94 | To run them, you can follow the examples below.
95 | Please notice that some scripts require you to call `python3.6`
96 | or alternatively activate a conda environment and then
97 | call only `python`.
98 |
99 | We recommend you mainly work on the `data` directory as download folder and a workplace to save your results.
100 | But of course this is up to you.
101 |
102 | ### 1. Query
103 | For this script, since we are using ASF to query images, no credentials are needed.
104 | Depending on your selected time range, the data querying can take long since what it does is loop over every single image that
105 | intersects your AOI and find matching scenes for the whole S1 lifetime
106 | (I know a bit useless but seems to be the only way now).
107 |
108 | ```commandline
109 | # Usage example
110 | python3.6 scripts/0_query_s1.py --download_dir data/s1/ --query_result s1_scenes.csv --date_start 2019/06/01 --date_end 2019/06/10 --aoi_path data/aoi/alta.geojson
111 | ```
112 | ```commandline
113 | # Get help
114 | python3.6 scripts/0_query_s1.py -h
115 | ```
116 |
117 | ### 2. Download
118 | Once you have run the query script, you will have a CSV file as an output.
119 | This file contains all the SAR image pairs that intersect your AOI and time frame and
120 | that correspond to the perpendicular and temporal thresholds set.
121 |
122 | We ask you now to go through the CSV file, and check which image pairs you would like to Download.
123 | For this you need to change the cell value of the image pair row under the column `Download` from `FALSE` to `TRUE`.
124 |
125 | Why is this a manual step? Because we want the analyst to check if the image pair is suitable or not for analysis.
126 | To help we added a link to the Sentinel Hub viewer for th closest Sentinel-2 image available for the dates of the image pair.
127 | Here you will be able to check if there was snow during your time period, if the cloud coverage was dense, if your area has
128 | very dense vegetation that might result in errors, etc.
129 |
130 | **IMPORTANT!**
131 | Since the download step is done through the ASF server, we need credentials that allow you to obtain the data.
132 | The credentials should be saved in a file called [`.env`](.env) on the directory mounted as a volume on the docker.
133 | Username should be saved as `asf_login` and password as `asf_pwd`. See an example below:
134 |
135 | ```text
136 | asf_login='USERNAME'
137 | asf_pwd='PASSWORD'
138 | ```
139 |
140 | If you cloned this repo, you will see an example of such a file on the main directory.
141 | Here you can replace `USERNAME` and `PASSWORD` with your credentials.
142 |
143 | Once the changes to the CSV files are saved and your `.env` file is ready, you can run the `1_download_s1.py` script as shown below.
144 |
145 | ```commandline
146 | # Usage example
147 | python3.6 scripts/1_download_s1.py --download_dir data/s1/ --query_result s1_scenes.csv
148 | ```
149 | ```commandline
150 | # Get help
151 | python3.6 scripts/1_download_s1.py -h
152 | ```
153 |
154 | Downloading Sentinel-1 data always takes a while and requires a lot of disk space.
155 | Remember that the download occurs on your local disk, if you have mounted a volume as suggested.
156 | Be prepared and patient! :massage:
157 |
158 | ### 3. DEM generation
159 | Now it is finally time to generate some DEMs.
160 | Taking the downloaded data and the query result form previous steps, we can now call the `2_dem_generation.py` module.
161 |
162 | The main arguments passed into this module are the path to the downloaded data,
163 | the CSV file which will be used to get the image pairs, a directory where the results are stored
164 | and the AOI to subset the area and to automatically extract bursts and subswaths.
165 |
166 | Several other parameters can be passed to specific parts of the workflow.
167 | Check the help for their descriptions and default values.
168 | ```commandline
169 | # Usage example
170 | python3.6 scripts/2_dem_generation.py --download_dir data/s1/ --output_dir data/results/ --query_result s1_scenes.csv --pair_index 0 --aoi_path data/aoi.geojson
171 | ```
172 | ```commandline
173 | # Get help
174 | python3.6 scripts/2_dem_generation.py -h
175 | ```
176 |
177 | If you skipped the query and download steps, you can pass your own index pairs as a list to the dem generation script:
178 | ```commandline
179 | # Usage example
180 | python3.6 scripts/2_dem_generation.py --download_dir data/s1/ --output_dir data/results/ --pair_ids 's1_scene_id_1' 's1_scene_id_2' --aoi_path data/aoi.geojson
181 | ```
182 |
183 | #### Generating DEMs in a loop
184 | If you are looking into generating DEMs in a loop, you can create a shell file (`.sh` extension) with the following:
185 |
186 | ```shell
187 | # replace {0..1} with the number of image pairs you
188 | # have set on your queried file (CSV). So for example, if you
189 | # set Download = TRUE for 5 pairs then do {0..4}
190 | for i in {0..1}; do
191 | python3.6 scripts/2_dem_generation.py --download_dir data/s1/ --output_dir data/results/ --query_result s1_scenes.csv --pair_index "$i" --aoi_path data/aoi.geojson
192 | done
193 | ```
194 |
195 | Depending on whether you have been using the container before, the processing might take more or less time.
196 | The main reason is that reference DEM data is being downloaded for the data.
197 |
198 | ### 4. Accuracy assessment
199 | I strongly recommend you do your own accuracy assessment of the resulting products with
200 | [xDEM](https://xdem.readthedocs.io/en/latest/).
201 |
202 | However, I have included a module that will allow
203 | the generation of several plots and error measurements based on `xDEM` that can help you get an idea
204 | of the quality of the DEMs.
205 |
206 | Please bear in mind I am still implementing this so the script and outputs might change a lot.
207 | **ALSO** make sure you activate the conda environment for `xDEM` before running the script.
208 |
209 | ```commandline
210 | # Usage example
211 | conda activate xdem-dev
212 | python scripts/3_assess_accuracy.py -h
213 | ```
214 |
215 | For now the arguments include several paths to data folders, see the help with the command above.
216 |
217 | **Note:** For the following paths:
218 | - reference DEM you want to use
219 | - (optional) LULC data to calculate statistics over land cover classes
220 |
221 | You can use the script below to get reference DEM data from OpenTopography and LULC data from WorldCover.
222 | Please consider you will need to create an account and get an API key for OpenTopography (all free).
223 | ```commandline
224 | # Usage example
225 | conda activate xdem-dev
226 | python scripts/3_1_aux_data_download.py -h
227 | ```
228 |
229 | When using the accuracy assessment script, add the flag `--coregister`
230 | to perform coregistration with the given reference DEM. This will default to
231 | Nuth-Kääb and De-ramping with degree 1 coregistration approaches,
232 | but you can pass more methods with the `--coregistration-method` flag.
233 |
234 | #### Running the accuracy assessment in a loop
235 | Like generating DEMs in a loop, you can run the 3_accuracy_assessment script over a
236 | directory where DEM outputs from different time-steps are stored.
237 | Specially if there were no changes in the directory structure, this could be a helpful
238 | bash script to run the accuracy assessment in a loop:
239 |
240 | ```shell
241 | for file in $(find home/data/results -maxdepth 1 -name "out_*" -type d | cut -d'/' -f2-); do
242 | python -W ignore scripts/3_assess_accuracy.py --s1_dem_dir ${file} --unstable_area_path data/aoi/unstable_area.gpkg --ref_dem_path data/reference_dem.tif --ref_dem_name NASADEM --elev_diff_min_max 100 --lulc_path data/lulc.tif --coregister
243 | done
244 | ```
245 |
246 | In this script you should replace `home/data/results` with the parent directory
247 | where the directories starting with `out_*` are located.
248 | `maxdepth` means it will only go one level in the directory tree,
249 | the `cut` statement removes the `home/` part of the find results to avoid conflicts
250 | on how the data is called.
251 |
252 | When calling the python script, the `-W ignore` flag is added to skip user warnings
253 | related to nodata.
254 | `--s1_dem_dir` is the parameter that will get looped over.
255 | All the other arguments should be replaced with paths to the relevant data sources.
256 |
257 |
258 | ### 5. Volume calculation
259 | The final task of the `slidem` script sequence is volume calculation.
260 | In the script you can add a pre-event and a post-event DEM, from which to
261 | calculate the volume of a specific "unstable area" outline.
262 |
263 | You can also pass a reference DEM, which will be used to calculate the error
264 | associated with each input S1-DEM, and consequently will serve to compute
265 | the propagation error for the estimated volume.
266 | Currently, the NMAD and Standard Error (SDE) are used for this task.
267 |
268 | The script will compute a DoD, produce a figure with a histogram of
269 | elevation difference values and another with maps of the
270 | pre- and post-event DEM and its DoD for comparison.
271 |
272 | A CSV file called volume_estimates.csv will also be created.
273 | This file will be updated everytime the same output directory is used,
274 | regardless of the pre- and post-event DEM data paths passed. The CSV file
275 | will collect the file name of each of these, and is meant ot ease comparison
276 | between several runs of the script.
277 |
278 | Make sure you activate the conda environment for `xDEM` before running
279 | the script.
280 |
281 | ```commandline
282 | # Usage example
283 | conda activate xdem-dev
284 | python scripts/4_calculate_volume.py -h
285 | ```
286 |
287 | ## Issues/problems/bugs
288 |
289 | We try to document all bugs or workflow problems in our
290 | [issue tracker](https://github.com/SliDEM-project/SliDEM-python/issues).
291 |
292 | Also feel free to browse through
293 | [our wiki](https://github.com/SliDEM-project/SliDEM-python/wiki/)
294 | with some FAQ.
295 |
296 | We are working to improve all these issues but
297 | for the moment please be aware and patient with us :pray:
298 |
299 | Feel free to open an issue if you find some new bug or have any request!
300 |
301 | Please refer to our [contributing guide](https://github.com/SliDEM-project/SliDEM-python/blob/main/CONTRIBUTING.md) for further info.
302 |
303 |
304 | ## Acknowledgements
305 |
306 |
This work is supported by the Austrian Research Promotion Agency (FFG)
307 | through the project [SliDEM](https://projekte.ffg.at/projekt/4052533) (Assessing the suitability of DEMs derived
308 | from Sentinel-1 for landslide volume estimation; contract no. 885370).
309 |
310 | ## Copyright
311 | Copyright 2022 Department of Geoinformatics – Z_GIS, University of Salzburg
312 |
313 | Licensed under the Apache License, Version 2.0 (the "License");
314 | you may not use this file except in compliance with the License.
315 | You may obtain a copy of the License at
316 |
317 | http://www.apache.org/licenses/LICENSE-2.0
318 |
319 | Unless required by applicable law or agreed to in writing, software
320 | distributed under the License is distributed on an "AS IS" BASIS,
321 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
322 | See the License for the specific language governing permissions and
323 | limitations under the License.
324 |
--------------------------------------------------------------------------------
/scripts/3_assess_accuracy.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path
3 | import xdem
4 | import csv
5 | import geoutils as gu
6 | import glob
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | import scicomap as sc
10 |
11 | # Arguments
12 | parser = argparse.ArgumentParser(
13 | description='''Perform an accuracy assessment of the S1-DEMs.
14 | Pass an external DEM for comparison.
15 | Use LULC (e.g. WorldCover) data to get binned statistics.
16 | You can download subsets of external DEMs and WorldCover data
17 | for your AOI through the OpenTopography API and AWS-S3 buckets
18 | with the 3_1_aux_data_download.py.
19 | ''',
20 | epilog='''
21 | Versions:
22 | v0.0.1 - 08/2022 - First implementation
23 | Authors:
24 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at''',
25 | formatter_class=argparse.RawTextHelpFormatter
26 | )
27 | parser.add_argument(
28 | '--s1_dem_dir',
29 | type=str,
30 | help='''relative path (refers to mounted volume) to the directory where
31 | the results of the DEM generation step are stored.
32 | If the file structures and naming was not altered, the script will look
33 | for the file with the ending `_elevation.tiff`.'''
34 | )
35 | parser.add_argument(
36 | '--s1_dem_path',
37 | type=str,
38 | help='''path to S1-based DEM to be assessed for errors. Ignored if
39 | s1_dem_dir is set.'''
40 | )
41 | parser.add_argument(
42 | '--unstable_area_path',
43 | type=str,
44 | help='''path to spatial file (WGS84 - EPSG:4326)
45 | with the outline for unstable areas (e.g. mass movement areas).
46 | This can include any other area that should be ignored during
47 | accuracy assessment (e.g. water areas, dense vegetation, etc.).
48 | If absent then the whole AOI is used for error analysis.'''
49 | )
50 | parser.add_argument(
51 | '--ref_dem_path',
52 | type=str,
53 | help='''path to external DEM to use for error assessment
54 | (and vertical co-registration if coregister=True).
55 | Use 3_1_aux_data_download.py to download elevation
56 | data from OpenTopography for your area of interest.'''
57 | )
58 | parser.add_argument(
59 | '--coregister',
60 | action='store_true',
61 | help='''Call this flag if the S1 DEM should be coregistered
62 | vertically to the reference DEM'''
63 | )
64 | parser.add_argument(
65 | '--coregistration_method',
66 | nargs="+",
67 | default=["NuthKaab", "Deramp_d1"],
68 | help='''List of xdem.coreg functions to set-up a coregistration algorithm
69 | Defaults to Nuth-Kaab (NuthKaab) and Deramping with degree 1 (Deramp_d1).
70 | Options are:
71 | "NuthKaab"
72 | "Deramp_d1"
73 | "Deramp_d2"
74 | "Deramp_d3"
75 | "ICP"
76 | "BiasCorr"
77 | Combined coregistration pipelines:
78 | "NuthKaab_Deramp_d1"
79 | "ICP_NuthKaab"
80 | "BiasCorr_ICP_NuthKaab"
81 | '''
82 | )
83 | parser.add_argument(
84 | '--lulc_path',
85 | type=str,
86 | default=None,
87 | help='''path to LULC raster dataset to use for error assessment.
88 | Use 3_1_aux_data_download.py to download WorldCover LULC data
89 | from the AWS S3 Bucket for your area of interest.'''
90 | )
91 | parser.add_argument(
92 | '--ref_dem_name',
93 | type=str,
94 | help='''shorter name for external DEM used.
95 | Used to determine output file names.'''
96 | )
97 | parser.add_argument(
98 | '--acc_output_dir',
99 | type=str,
100 | default='accuracy',
101 | help='''name of the directory where the accuracy assessment should
102 | be saved into. It defaults to a sub-directory inside the `s1_dem_dir`
103 | named "accuracy".'''
104 | )
105 | parser.add_argument(
106 | '--no_data_value',
107 | type=float,
108 | default=-99999,
109 | help='''Value to use for no data, defaults to -99999'''
110 | )
111 | parser.add_argument(
112 | '--elev_diff_min_max',
113 | type=float,
114 | default=20,
115 | help='''Absolute value to plot elevation differences,
116 | what should be the minimum and maximum elevation difference value?
117 | Defaults to -20, +20.'''
118 | )
119 | args = parser.parse_args()
120 |
121 | os.chdir('home/')
122 |
123 | # Create output_dir if not existing
124 | # If directory to DEM generation output folder is given,
125 | # then create the directory inside this folder
126 | if args.s1_dem_dir is not None:
127 | accuracy_dir = os.path.join(args.s1_dem_dir, args.acc_output_dir)
128 | if not os.path.exists(accuracy_dir):
129 | os.mkdir(accuracy_dir)
130 | # Otherwise just create the given folder for the accuracy assessment
131 | else:
132 | accuracy_dir = args.acc_output_dir
133 | if not os.path.exists(accuracy_dir):
134 | os.mkdir(accuracy_dir)
135 |
136 | # Naming for files
137 | if args.ref_dem_name is None:
138 | ref_dem_name = args.ref_dem_path.split('/', -1)[-1].replace('.tif', '')
139 | else:
140 | ref_dem_name = args.ref_dem_name
141 |
142 | if args.s1_dem_path is not None:
143 | s1_dem_path = args.s1_dem_path
144 | else:
145 | s1_dem_path = glob.glob(os.path.join(args.s1_dem_dir,
146 | str("*_elevation.tif")))[0]
147 |
148 | s1_dem_name = os.path.basename(s1_dem_path).split('.')[0]
149 |
150 | # Error assessment with xdem
151 | reference_dem = xdem.DEM(args.ref_dem_path)
152 | s1_dem = xdem.DEM(s1_dem_path)
153 | if args.lulc_path is not None:
154 | lulc = gu.Raster(args.lulc_path)
155 | lulc_resampled = lulc.reproject(s1_dem, resampling="bilinear")
156 |
157 | # Assign no data value if needed
158 | s1_dem.set_nodata(args.no_data_value,
159 | update_array=True, update_mask=True)
160 |
161 | # Reproject reference DEM to S1 DEM
162 | reference_dem = reference_dem.reproject(s1_dem)
163 |
164 | # Unstable areas
165 | unstable = gu.Vector(args.unstable_area_path)
166 | unstable = gu.Vector(unstable.ds.to_crs(s1_dem.crs))
167 |
168 | prefix_before = "error_" + ref_dem_name + "_S1_" + s1_dem_name
169 |
170 | # Write error measurements to CSV file
171 | header = [
172 | 's1_dem', 'ref_dem',
173 | 'lulc', 's1_dem_coreg',
174 | 'min_error', 'max_error', 'mean_error',
175 | 'std_error', 'mae', 'rmse', 'nmad'
176 | ]
177 | csv_file = os.path.join(
178 | accuracy_dir,
179 | "error_S1_" + s1_dem_name + '.csv'
180 | )
181 |
182 | if not os.path.exists(csv_file):
183 | with open(csv_file, 'w', encoding='UTF8') as f:
184 | writer = csv.writer(f)
185 |
186 | # write the header
187 | writer.writerow(header)
188 |
189 |
190 | # Function to compute errors
191 | def compute_errors(src, ref, prefix, coreg_filename=None):
192 | # Calculate DEM error (dh)
193 | dh = ref - src
194 | dh_file = os.path.join(
195 | accuracy_dir,
196 | prefix + ".tif"
197 | )
198 | dh.save(dh_file)
199 |
200 | dem_ls = [ref, src, dh]
201 | # Plot
202 | plt.figure(figsize=(16, 4), dpi=300.0, tight_layout=True)
203 | plt_extent = [
204 | dh.bounds.left,
205 | dh.bounds.right,
206 | dh.bounds.bottom,
207 | dh.bounds.top,
208 | ]
209 | cmaps = ['viridis', 'viridis', sc.ScicoDiverging(cmap='vik').get_mpl_color_map()]
210 | vlims = [(None, None) for i in range(3)]
211 | vlims[0] = [np.nanmin(ref.data), np.nanmax(ref.data)]
212 | vlims[1] = vlims[0]
213 | vlims[2] = [-args.elev_diff_min_max, args.elev_diff_min_max]
214 | labels = [
215 | 'Elevation (m)',
216 | 'Elevation (m)',
217 | 'Elevation difference (m)'
218 | ]
219 | titles = [
220 | 'Reference DEM',
221 | 'S1 DEM',
222 | 'Reference - S1 DEM'
223 | ]
224 | for i in range(3):
225 | plt.subplot(1, 3, i + 1)
226 | ax = plt.gca()
227 | unstable.ds.plot(ax=ax, fc='none', ec='k')
228 | ax.plot([], [], color='k', label='Unstable areas')
229 | plt.imshow(
230 | dem_ls[i].data.squeeze(),
231 | cmap=cmaps[i],
232 | extent=plt_extent,
233 | vmin=vlims[i][0], vmax=vlims[i][1]
234 | )
235 | cbar = plt.colorbar()
236 | cbar.set_label(labels[i])
237 | plt.xticks([])
238 | plt.yticks([])
239 | plt.legend(loc='lower right')
240 | plt.title(titles[i])
241 |
242 | plt.savefig(
243 | os.path.join(
244 | accuracy_dir,
245 | prefix + ".png"
246 | )
247 | )
248 | plt.close()
249 |
250 | mask = ~unstable.create_mask(dh)
251 |
252 | # Keep only stable terrain data
253 | dh.set_mask(np.invert(mask))
254 | # Compute terrain attributes for reference DEM
255 | attr_list = ["hillshade", "slope", "aspect", "curvature",
256 | "terrain_ruggedness_index", "rugosity"]
257 | attributes_ref_dem = xdem.terrain.get_terrain_attribute(
258 | dem=ref.data,
259 | resolution=dh.res,
260 | attribute=attr_list
261 | )
262 | attributes_src_dem = xdem.terrain.get_terrain_attribute(
263 | dem=src.data,
264 | resolution=dh.res,
265 | attribute=attr_list
266 | )
267 |
268 | # Plot attributes for ref and src
269 | def plot_attr(dem, attributes):
270 | plt.figure(figsize=(8.2, 7), dpi=300.0, tight_layout=True)
271 |
272 | plt_extent = [dem.bounds.left, dem.bounds.right, dem.bounds.bottom, dem.bounds.top]
273 |
274 | cmaps = ["Greys_r", "Reds", "twilight", "RdGy_r", "Purples", "YlOrRd"]
275 | labels = ["Hillshade", "Slope (°)", "Aspect (°)", "Curvature (100 / m)",
276 | "Terrain Ruggedness Index", "Rugosity"]
277 | vlims = [(None, None) for i in range(6)]
278 | vlims[3] = [-2, 2]
279 |
280 | for i in range(6):
281 | plt.subplot(3, 2, i + 1)
282 | plt.imshow(
283 | attributes[i].squeeze(),
284 | cmap=cmaps[i],
285 | extent=plt_extent,
286 | vmin=vlims[i][0], vmax=vlims[i][1]
287 | )
288 | cbar = plt.colorbar()
289 | cbar.set_label(labels[i])
290 | plt.xticks([])
291 | plt.yticks([])
292 |
293 | return plt
294 |
295 | plot_attr_ref = plot_attr(ref, attributes_ref_dem)
296 |
297 | plot_attr_ref.savefig(
298 | os.path.join(
299 | accuracy_dir,
300 | "terrain_attributes_" + ref_dem_name + ".png"
301 | ),
302 | dpi=300, bbox_inches='tight'
303 | )
304 | plot_attr_ref.close()
305 |
306 | plot_attr_src = plot_attr(src, attributes_src_dem)
307 | plot_attr_src.savefig(
308 | os.path.join(
309 | accuracy_dir,
310 | "terrain_attributes_" + s1_dem_name + ".png"
311 | ),
312 | dpi=300, bbox_inches='tight'
313 | )
314 | plot_attr_src.close()
315 |
316 | if coreg_filename is not None:
317 | coreg_fn = os.path.basename(coreg_filename).split('.')[0]
318 | plot_attr_crg = plot_attr(src, attributes_src_dem)
319 | plot_attr_crg.savefig(
320 | os.path.join(
321 | accuracy_dir,
322 | "terrain_attributes_" + coreg_fn + ".png"
323 | ),
324 | dpi=300, bbox_inches='tight'
325 | )
326 | plot_attr_crg.close()
327 |
328 | # Compute measures
329 | # Write error measures data to csv file
330 | with open(csv_file, 'a', encoding='UTF8') as f:
331 | writer = csv.writer(f)
332 |
333 | # data input
334 | data = [
335 | s1_dem_path, args.ref_dem_path,
336 | (args.lulc_path if args.lulc_path else "Not used"),
337 | (coreg_filename if coreg_filename else "Before coregistration"),
338 | np.nanmin(dh.data), np.nanmax(dh.data), np.nanmean(dh.data),
339 | np.nanstd(dh.data),
340 | np.nanmean(np.abs(dh.data)),
341 | np.sqrt((dh.data ** 2).mean()),
342 | xdem.spatialstats.nmad(dh.data)
343 | ]
344 | # write the data
345 | writer.writerow(data)
346 |
347 | # Estimate the measurement error by bin of slope,
348 | # using a desired error measure as robust estimator
349 | # defaults to count and nmad
350 | variables = [attributes_ref_dem[1].ravel(), attributes_ref_dem[2].ravel()]
351 | variables_names = ['slope', 'aspect']
352 | variables_bins = [30, 30]
353 | if args.lulc_path is not None:
354 | variables.append(lulc_resampled.data.ravel())
355 | variables_names.append('lulc')
356 | variables_bins.append(len(np.unique(lulc_resampled.data.ravel())))
357 | df_ns = xdem.spatialstats.nd_binning(
358 | dh.data.ravel(),
359 | list_var=variables,
360 | list_var_names=variables_names,
361 | statistics=['count', xdem.spatialstats.nmad],
362 | list_var_bins=variables_bins
363 | )
364 |
365 | df_ns[df_ns.nd == 1].to_csv(
366 | os.path.join(
367 | accuracy_dir,
368 | 'binned_stats_' + prefix + ".csv"
369 | ),
370 | index=False
371 | )
372 |
373 | # Plot binned elevation
374 | xdem.spatialstats.plot_1d_binning(
375 | df_ns, 'slope', 'nmad',
376 | 'Slope (degrees) calculated from Reference DEM',
377 | 'Elevation error: NMAD (m)'
378 | )
379 |
380 | plt.savefig(
381 | os.path.join(
382 | accuracy_dir,
383 | prefix + "_nmad_binned_slope.png"
384 | ),
385 | dpi=300, bbox_inches='tight'
386 | )
387 | plt.close()
388 |
389 | xdem.spatialstats.plot_1d_binning(
390 | df_ns, 'aspect', 'nmad',
391 | 'Aspect (degrees) calculated from Reference DEM',
392 | 'Elevation error: NMAD (m)'
393 | )
394 |
395 | plt.savefig(
396 | os.path.join(
397 | accuracy_dir,
398 | prefix + "_nmad_binned_aspect.png"
399 | ),
400 | dpi=300, bbox_inches='tight'
401 | )
402 | plt.close()
403 |
404 | if args.lulc_path is not None:
405 | xdem.spatialstats.plot_1d_binning(
406 | df_ns, 'lulc', 'nmad',
407 | 'LULC',
408 | 'Elevation error: NMAD (m)'
409 | )
410 |
411 | plt.savefig(
412 | os.path.join(
413 | accuracy_dir,
414 | prefix + "_nmad_binned_lulc.png"
415 | ),
416 | dpi=300, bbox_inches='tight'
417 | )
418 | plt.close()
419 |
420 |
421 | def coregister_func(coregister_name):
422 | match coregister_name:
423 | case "NuthKaab":
424 | return xdem.coreg.NuthKaab()
425 | case "Deramp_d1":
426 | return xdem.coreg.Deramp(degree=1)
427 | case "Deramp_d2":
428 | return xdem.coreg.Deramp(degree=2)
429 | case "Deramp_d3":
430 | return xdem.coreg.Deramp(degree=3)
431 | case "ICP":
432 | return xdem.coreg.ICP()
433 | case "BiasCorr":
434 | return xdem.coreg.BiasCorr()
435 | case "NuthKaab_Deramp_d1":
436 | return xdem.coreg.NuthKaab() + xdem.coreg.Deramp(degree=1)
437 | case "ICP_NuthKaab":
438 | return xdem.coreg.ICP() + xdem.coreg.NuthKaab()
439 | case "BiasCorr_ICP_NuthKaab":
440 | return xdem.coreg.BiasCorr() + xdem.coreg.ICP() + xdem.coreg.NuthKaab()
441 |
442 |
443 | def coregister(src, ref, coreg_type):
444 | coregister_function = coregister_func(coreg_type)
445 | dh = ref - src
446 | mask = ~unstable.create_mask(dh)
447 | coregister_function.fit(
448 | ref.data, src.data,
449 | inlier_mask=mask,
450 | transform=ref.transform
451 | )
452 | out = xdem.DEM.from_array(
453 | coregister_function.apply(src.data, transform=src.transform),
454 | transform=src.transform,
455 | crs=src.crs,
456 | nodata=args.no_data_value
457 | )
458 | prefix_file = s1_dem_name + '_' + ref_dem_name + '_coreg_' + coreg_type
459 | prefix_after = "error_" + ref_dem_name + "_S1_" + s1_dem_name + '_coreg_' + coreg_type
460 | filename = os.path.join(
461 | accuracy_dir,
462 | prefix_file + '.tif'
463 | )
464 | out.save(filename)
465 | s1_dem_coregistered = xdem.DEM(filename)
466 | # Assign no data value if needed
467 | s1_dem_coregistered.set_nodata(args.no_data_value,
468 | update_array=True, update_mask=True)
469 | compute_errors(
470 | s1_dem_coregistered,
471 | ref,
472 | prefix_after,
473 | coreg_filename=filename
474 | )
475 | dh_before = (ref - src)
476 | dh_before.set_mask(np.invert(mask))
477 | dh_after = (ref - s1_dem_coregistered)
478 | dh_after.set_mask(np.invert(mask))
479 | nmad_before = xdem.spatialstats.nmad(dh_before.data)
480 | nmad_after = xdem.spatialstats.nmad(dh_after.data)
481 | # Make histogram with values before and after coregistration
482 | plt.figure(figsize=(6, 6.7), dpi=300.0, tight_layout=True)
483 | plt.title(f"NMAD before: {nmad_before:.2f} m, "
484 | f"NMAD after: {nmad_after:.2f} m")
485 | plt.xlabel(f"Elevation difference (m)")
486 | plt.hist(dh_before.data.flatten(),
487 | bins=np.linspace(-args.elev_diff_min_max, args.elev_diff_min_max, 100),
488 | alpha=0.75, label="Before coregistration")
489 | plt.hist(dh_after.data.flatten(), facecolor='g',
490 | bins=np.linspace(-args.elev_diff_min_max, args.elev_diff_min_max, 100),
491 | alpha=0.75, label="After coregistration")
492 | plt.legend(loc='upper right')
493 | plt.savefig(
494 | os.path.join(
495 | accuracy_dir,
496 | prefix_after + "_hist.png"
497 | ),
498 | dpi=300, bbox_inches='tight'
499 | )
500 | plt.close()
501 | return out
502 |
503 |
504 | # Coregister and compute errors
505 | if args.coregister:
506 | coreg_list = []
507 | for c in args.coregistration_method:
508 | s1_dem_coreg = coregister(s1_dem, reference_dem, c)
509 | coreg_list.append(s1_dem_coreg)
510 |
511 | # Compute errors before coregistration
512 | compute_errors(s1_dem, reference_dem, prefix_before)
513 |
--------------------------------------------------------------------------------
/scripts/2_dem_generation.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Import modules
4 | import argparse
5 | import glob
6 | import geopandas as gpd
7 | import json
8 | import numpy as np
9 | import os
10 | import pandas as pd
11 | from shapely.geometry import shape, GeometryCollection
12 | from snappy import ProductIO, jpy, GPF
13 | import stsa
14 | import subprocess
15 |
16 | # Arguments
17 | parser = argparse.ArgumentParser(
18 | description='''Generate DEMs based on Sentinel-1 image pairs.
19 | This module will go through four processing pipelines, which
20 | output different intermediate steps within the workflow,
21 | including interferogram and coherence layers.
22 | Each pipeline results in a different directory within the output
23 | directory set in the arguments.
24 |
25 | The `query_result` file from the previous steps, should have been edited
26 | by the user to change the Download column to TRUE for those pair of scenes
27 | that seem suitable for processing, since this script loops over this file.
28 | ''',
29 | epilog='''
30 | Versions:
31 | v0.0.1 - 06/2022 - Generate DEMs from S1
32 | Authors:
33 | Lorena Abad - University of Salzburg - lorena.abad@plus.ac.at''',
34 | formatter_class=argparse.RawTextHelpFormatter
35 | )
36 | parser.add_argument(
37 | '--download_dir',
38 | type=str,
39 | default='data',
40 | required=True,
41 | help='''relative path (refers to mounted volume) to the directory
42 | where S1 scenes were downloaded'''
43 | )
44 | parser.add_argument(
45 | '--output_dir',
46 | type=str,
47 | default='data',
48 | required=True,
49 | help='''relative path (refers to mounted volume) to the directory where'
50 | results should be written into'''
51 | )
52 | parser.add_argument(
53 | '--query_result',
54 | type=str,
55 | help='''path to the CSV file with query results from 0_query_s1.py.
56 | Should be located in the specified download_dir, and should have been
57 | edited to set Download=True where relevant.'''
58 | )
59 | parser.add_argument(
60 | '--pair_index',
61 | type=int,
62 | default=0,
63 | help='''refers to the query_result CSV file, where the rows with Download=True
64 | are filtered out to get a list of the image pairs to process.
65 | Set the index to indicate which row to process next.
66 | Defaults to the first pair (pair_index=0)'''
67 | )
68 | parser.add_argument(
69 | '--pair_ids',
70 | nargs="+",
71 | default=None,
72 | help='''what are the scene IDs to process, in a list form. Ignored when query_result
73 | and pair_index are given.'''
74 | )
75 | parser.add_argument(
76 | '--aoi_path',
77 | type=str,
78 | required=True,
79 | help='''path to GeoJSON file (WGS84 - EPSG:4326) with the
80 | study area outline. This is used to extract the subswaths and
81 | bursts automatically and to subset the area if desired.'''
82 | )
83 | parser.add_argument(
84 | '--polarization',
85 | type=str,
86 | default="VV",
87 | help='''Set polarization to use, defaults to VV.'''
88 | )
89 | parser.add_argument(
90 | '--dem',
91 | type=str,
92 | default="Copernicus 30m Global DEM",
93 | help='''Set DEM for back-geocoding.
94 | Defaults to `Copernicus 30m Global DEM`.
95 | To choose from: `SRTM 3Sec`, `SRTM 1Sec HGT`,
96 | `GETASSE30`, `CDEM`'''
97 | )
98 | parser.add_argument(
99 | '--no_subset',
100 | action='store_false',
101 | help='''Call this flag if the process should
102 | be performed in the whole AOI? Otherwise it will create
103 | additionally a subset corresponding to the
104 | given AOI.'''
105 | )
106 | parser.add_argument(
107 | '--aoi_buffer',
108 | type=float,
109 | default=0,
110 | help='''If subsetting the area to the AOI, should a
111 | buffer around it be drawn? How big? Defaults to 0'''
112 | )
113 | parser.add_argument(
114 | '--no_output_projected',
115 | action='store_false',
116 | help='''Call this flag if the final outputs should
117 | be in WGS84. Otherwise it will project them in WGS84/UTM.'''
118 | )
119 | parser.add_argument(
120 | '--ifg_indwinsize',
121 | action='store_false',
122 | help='''Call this flag if the resulting interferogram should have
123 | independent window sizes otherwise, it will have squared pixel size'''
124 | )
125 | parser.add_argument(
126 | '--ifg_cohwin_rg',
127 | type=int,
128 | default=10,
129 | help='''Coherence range window size (defaults to 10)'''
130 | )
131 | parser.add_argument(
132 | '--ifg_cohwin_az',
133 | type=int,
134 | default=2,
135 | help='''Coherence azimuth window size
136 | (defaults to 2, when --ifg_indwinsize is called)'''
137 | )
138 | parser.add_argument(
139 | '--no_multilook',
140 | action='store_false',
141 | help='''Should multilooking be skipped?'''
142 | )
143 | parser.add_argument(
144 | '--multilook_range',
145 | type=int,
146 | default=6,
147 | help='''Number of multilook range, defaults to 6'''
148 | )
149 | parser.add_argument(
150 | '--no_goldstein',
151 | action='store_false',
152 | help='''Should Goldstein filtering be skipped?'''
153 | )
154 | parser.add_argument(
155 | '--gpf_fftsize',
156 | type=int,
157 | default=64,
158 | help='''FFT size, defaults to 64. Options 32, 64, 128, 256'''
159 | )
160 | parser.add_argument(
161 | '--gpf_win',
162 | type=int,
163 | default=3,
164 | help='''FFT window size, defaults to 3. Options: 3, 5, 7'''
165 | )
166 | parser.add_argument(
167 | '--gpf_cohmask',
168 | action='store_true',
169 | help='''Use coherence mask?'''
170 | )
171 | parser.add_argument(
172 | '--gpf_cohth',
173 | type=float,
174 | default=0.2,
175 | help='''If coherence mask is used, what should be the threshold?
176 | Between 0 and 1, Defaults to 0.2'''
177 | )
178 | parser.add_argument(
179 | '--snaphu_costmode',
180 | type=str,
181 | default='TOPO',
182 | help='''Cost mode parameter for snaphu export.
183 | Either TOPO or SMOOTH are viable options.
184 | DEFO is for deformation and not recommended.
185 | Defaults to TOPO'''
186 | )
187 | parser.add_argument(
188 | '--snaphu_tiles',
189 | type=int,
190 | default=1,
191 | help='''Number of tiles parameter for snaphu export.
192 | (Usually increasing this argument causes problems).
193 | Defaults to 1'''
194 | )
195 | parser.add_argument(
196 | '--snaphu_tile_overlap_row',
197 | type=float,
198 | default=200,
199 | help='''If more than one tile is set, what should the overlap
200 | between tiles be for the rows. Ignored when snaphu_tiles = 1.
201 | Defaults to 200'''
202 | )
203 | parser.add_argument(
204 | '--snaphu_tile_overlap_col',
205 | type=float,
206 | default=200,
207 | help='''If more than one tile is set, what should the overlap
208 | between tiles be for the columns.
209 | Defaults to 200'''
210 | )
211 | parser.add_argument(
212 | '--pixel_size',
213 | type=float,
214 | default=30.0,
215 | help='''When applying terrain correction, what pixel size to use, defaults to 30'''
216 | )
217 | parser.add_argument(
218 | '--no_data_value',
219 | type=float,
220 | default=-99999,
221 | help='''Value to use for no data, defaults to -99999'''
222 | )
223 | parser.add_argument(
224 | '--coherence_threshold',
225 | type=float,
226 | default=0.15,
227 | help='''Minimum coherence value to export valid elevation pixels, defaults to 0.15'''
228 | )
229 | parser.add_argument(
230 | '--pipelines',
231 | nargs="+",
232 | default=['P1', 'P2', 'P3'],
233 | help='''Which pipelines to run, defaults to all.
234 | Pipeline 1: generates interferogram
235 | Pipeline 2: performs snaphu unwrapping
236 | Pipeline 3: generates DEM.
237 | Useful for debugging.'''
238 | )
239 | args = parser.parse_args()
240 |
241 | # Set home as current directory
242 | os.chdir('home/')
243 |
244 | # Read in image pairs
245 | if args.query_result is not None:
246 | products = pd.read_csv(
247 | os.path.join(args.download_dir, args.query_result),
248 | sep=None, engine='python'
249 | )
250 | productsIn = products[products['Download']]
251 |
252 | prodRefDate = productsIn.iloc[args.pair_index]['ReferenceDate']
253 | prodMatDate = productsIn.iloc[args.pair_index]['MatchDate']
254 | prodRefId = productsIn.iloc[args.pair_index]['ReferenceID']
255 | prodMatId = productsIn.iloc[args.pair_index]['MatchID']
256 | elif args.pair_ids is not None:
257 | prodRefId = args.pair_ids[0]
258 | prodRefDate = pd.to_datetime(
259 | prodRefId[17:25],
260 | format='%Y%m%d'
261 | )
262 | prodMatId = args.pair_ids[1]
263 | prodMatDate = pd.to_datetime(
264 | prodMatId[17:25],
265 | format='%Y%m%d'
266 | )
267 | else:
268 | raise ValueError("You need to provide either the "
269 | "path to the query_result CSV file "
270 | "or a list containing pair_ids")
271 |
272 | # Set image order
273 | # "before" image .zip
274 | if pd.to_datetime(prodRefDate) < pd.to_datetime(prodMatDate):
275 | file_path_1 = os.path.join(args.download_dir, prodRefId + '.zip')
276 | else:
277 | file_path_1 = os.path.join(args.download_dir, prodMatId + '.zip')
278 | # "after" image .zip
279 | if pd.to_datetime(prodMatDate) > pd.to_datetime(prodRefDate):
280 | file_path_2 = os.path.join(args.download_dir, prodMatId + '.zip')
281 | else:
282 | file_path_2 = os.path.join(args.download_dir, prodRefId + '.zip')
283 |
284 | ref_date_str = pd.to_datetime(
285 | prodRefDate,
286 | yearfirst=False,
287 | dayfirst=True
288 | )
289 | mat_date_str = pd.to_datetime(
290 | prodMatDate,
291 | yearfirst=False,
292 | dayfirst=True
293 | )
294 |
295 | # Create output_dir if not existing
296 | if not os.path.exists(args.output_dir):
297 | os.mkdir(args.output_dir)
298 |
299 | # Output directory with date parenthesis
300 | date_bundle = ref_date_str.strftime('%Y%m%d') + '_' + \
301 | mat_date_str.strftime('%Y%m%d')
302 | output_dir = os.path.join(args.output_dir, 'out_' + date_bundle)
303 | if not os.path.exists(output_dir):
304 | os.mkdir(output_dir)
305 |
306 | # Get some metadata from the CSV file:
307 | ref_date = ref_date_str.strftime('%d/%m/%Y')
308 | mat_date = mat_date_str.strftime('%d/%m/%Y')
309 |
310 | # Hashmap is used to give us access to all JAVA operators
311 | HashMap = jpy.get_type('java.util.HashMap')
312 | parameters = HashMap()
313 |
314 |
315 | # Functions:
316 | # From this section I define a set of functions that are called
317 | # within a pipeline at the end of the script. Each function will start with a
318 | # comment saying what it does, and will include an indicator as to which
319 | # pipeline it belongs to (P1, P2, P3, P4)
320 |
321 |
322 | # [P1|P2] Function to read AOI
323 | def read_aoi(aoi, buffer):
324 | tempgeojson = os.path.join(args.download_dir, "temp_aoi.geojson")
325 | aoidf = gpd.read_file(aoi)
326 | if aoidf.crs != 'epsg:4326':
327 | aoidf = aoidf.to_crs(4326)
328 | aoidf.to_file(tempgeojson, driver="GeoJSON")
329 | aoi_file = tempgeojson
330 | else:
331 | aoi_file = aoi
332 | # Read aoi with shapely
333 | with open(aoi_file) as f:
334 | features = json.load(f)["features"]
335 |
336 | if os.path.exists(tempgeojson):
337 | os.remove(tempgeojson)
338 |
339 | return GeometryCollection(
340 | [shape(feature["geometry"]).buffer(buffer) for feature in features]
341 | )
342 |
343 |
344 | # [P1] Function to get subswaths and bursts
345 | def get_swath_burst(filename, aoi,
346 | polar=args.polarization,
347 | product_idx=1):
348 | print('Extracting subswath and bursts for AOI...')
349 | aoi_geom = read_aoi(aoi, buffer=args.aoi_buffer)
350 |
351 | # Apply Top Split Analyzer to S1 file with stsa
352 | # Initialize object
353 | img = stsa.TopsSplitAnalyzer(
354 | target_subswaths=['iw1', 'iw2', 'iw3'],
355 | polarization=polar.lower()
356 | )
357 | # Load zip file
358 | img.load_data(zip_path=filename)
359 | # Save to json to check geometry manually
360 | img.to_json(os.path.join(
361 | output_dir,
362 | 'subswath_bursts_' + str(product_idx) + '.geojson'))
363 |
364 | # Create geodataframe with subswaths, bursts and geoms
365 | # Calling an internal method to avoid writing to json and reloading
366 | img._create_subswath_geometry()
367 | img_df = img.df
368 |
369 | # Intersect geodataframe with aoi
370 | img_df = img_df[img_df.intersects(aoi_geom)].reset_index()
371 |
372 | # Return intersecting subswaths and bursts
373 | df = img_df[['subswath', 'burst']]
374 | df['burst_idx'] = df.groupby('subswath').cumcount()
375 | if product_idx == 1:
376 | df['burst_idx'] = np.where(df['burst_idx'] == 0, "firstBurstIndex_1",
377 | np.where(df['burst_idx'] == 1, "lastBurstIndex_1", "ignore"))
378 | df = df.pivot(index='subswath', columns='burst_idx', values='burst') \
379 | .rename_axis(columns=None).reset_index()
380 | if 'lastBurstIndex_1' not in df.columns:
381 | df['lastBurstIndex_1'] = np.nan
382 | return df
383 | else:
384 | df['burst_idx'] = np.where(df['burst_idx'] == 0, "firstBurstIndex_2",
385 | np.where(df['burst_idx'] == 1, "lastBurstIndex_2", "ignore"))
386 | df = df.pivot(index='subswath', columns='burst_idx', values='burst') \
387 | .rename_axis(columns=None).reset_index()
388 | if 'lastBurstIndex_2' not in df.columns:
389 | df['lastBurstIndex_2'] = np.nan
390 | return df
391 |
392 |
393 | # [P1|P2|P3|P4] Function to read the .zip file into SNAP
394 | def read(filename):
395 | print('Reading...')
396 | return ProductIO.readProduct(filename)
397 |
398 |
399 | # [P1|P2|P3] Function to write SNAP product to GeoTIFF
400 | def write_TIFF_format(product, filename):
401 | ProductIO.writeProduct(product, filename, "GeoTiff")
402 |
403 |
404 | # [P1|P2|P3] Function to write SNAP product to BEAM-DIMAP format
405 | def write_BEAM_DIMAP_format(product, filename):
406 | print('Saving BEAM-DIMAP format.')
407 | ProductIO.writeProduct(product, filename + '.dim', 'BEAM-DIMAP')
408 |
409 |
410 | # [P2] Function to apply TOPSAR split with SNAP
411 | def topsar_split(product, IW, firstBurstIndex, lastBurstIndex, polar=args.polarization):
412 | print('Applying TOPSAR Split...')
413 | parameters.put('subswath', IW)
414 | parameters.put('firstBurstIndex', firstBurstIndex)
415 | parameters.put('lastBurstIndex', lastBurstIndex)
416 | parameters.put('selectedPolarisations', polar)
417 | output = GPF.createProduct("TOPSAR-Split", parameters, product)
418 | return output
419 |
420 |
421 | # [P2] Function to apply Orbit file with SNAP
422 | def apply_orbit_file(product):
423 | print('Applying orbit file...')
424 | parameters.put("Orbit State Vectors", "Sentinel Precise (Auto Download)")
425 | parameters.put("Polynomial Degree", 3)
426 | parameters.put("Do not fail if new orbit file is not found", True)
427 | return GPF.createProduct("Apply-Orbit-File", parameters, product)
428 |
429 |
430 | # [P2] Function to do back geocoding with SNAP
431 | def back_geocoding(product, dem):
432 | print('Back geocoding...')
433 | parameters.put("demName", dem)
434 | parameters.put("demResamplingMethod", "BILINEAR_INTERPOLATION")
435 | parameters.put("resamplingType", "BILINEAR_INTERPOLATION")
436 | parameters.put("maskOutAreaWithoutElevation", True)
437 | parameters.put("outputDerampDemodPhase", True)
438 | parameters.put("disableReramp", False)
439 | return GPF.createProduct("Back-Geocoding", parameters, product)
440 |
441 |
442 | # [P2] Function to apply Enhanced Spectral Diversity with SNAP
443 | def enhanced_spectral_diversity(product):
444 | print('Applying Enhanced Spectral Diversity...')
445 | # called with defaults
446 | # should only be applied if multiple bursts were used in topsar_split
447 | return GPF.createProduct("Enhanced-Spectral-Diversity", parameters, product)
448 |
449 |
450 | # [P2] Function for TOPSAR deburst with SNAP
451 | def topsar_deburst(sources):
452 | print('Running TOPSAR deburst...')
453 | parameters.put("Polarisations", args.polarization)
454 | output = GPF.createProduct("TOPSAR-Deburst", parameters, sources)
455 | return output
456 |
457 |
458 | # [P2] Function to calculate the interferogram with SNAP
459 | def interferogram(product, ifg_indwinsize, ifg_cohwin_az, ifg_cohwin_rg):
460 | print('Creating interferogram...')
461 | parameters.put("Subtract flat-earth phase", True)
462 | parameters.put("Degree of \"Flat Earth\" polynomial", 5)
463 | parameters.put("Number of \"Flat Earth\" estimation points", 501)
464 | parameters.put("Orbit interpolation degree", 3)
465 | parameters.put("Include coherence estimation", True)
466 | parameters.put("Square Pixel", not ifg_indwinsize)
467 | parameters.put("Independent Window Sizes", ifg_indwinsize)
468 | parameters.put("Coherence Range Window Size", ifg_cohwin_rg)
469 | parameters.put("Coherence Azimuth Window Size", ifg_cohwin_az)
470 | return GPF.createProduct("Interferogram", parameters, product)
471 |
472 |
473 | # [P2] Function for TOPSAR merge with SNAP
474 | def topsar_merge(sources):
475 | print('Running TOPSAR merge...')
476 | parameters.put("selectedPolarisations", args.polarization)
477 | output = GPF.createProduct("TOPSAR-Merge", parameters, sources)
478 | return output
479 |
480 |
481 | # [P2] Function for topophase removal (optional) with SNAP
482 | def topophase_removal(product, dem):
483 | parameters.put("Orbit Interpolation Degree", 3)
484 | parameters.put("demName", dem)
485 | parameters.put("Tile Extension[%]", 100)
486 | parameters.put("Output topographic phase band", True)
487 | parameters.put("Output elevation band", False)
488 | return GPF.createProduct("TopoPhaseRemoval", parameters, product)
489 |
490 |
491 | # [P2] Function for multilooking (optional) with SNAP
492 | # Multi-looking is used to reduce resolution.
493 | # ML_nRgLooks stands for number of range looks
494 | # grSquarePixel is set to True to default to a square pixel,
495 | # hence the number of azimuth looks is automatically calculated.
496 | # https://forum.step.esa.int/t/alterring-spatial-resolution-of-sentinel-1-image/21906/7
497 | def multilook(product, ML_nRgLooks):
498 | print('Multi-looking...')
499 | parameters.put('grSquarePixel', True)
500 | parameters.put("nRgLooks", ML_nRgLooks) # half of range looks on metadata
501 | output = GPF.createProduct("Multilook", parameters, product)
502 | return output
503 |
504 |
505 | # [P2] Function to apply Goldstein phase filtering (optional) with SNAP
506 | def goldstein_phase_filter(product, gpf_fftsize, gpf_win,
507 | gpf_cohmask, gpf_cohth):
508 | print('Applying Goldstein phase filtering...')
509 | parameters.put("Adaptive Filter Exponent in(0,1]:", 1.0)
510 | parameters.put("FFT Size", gpf_fftsize)
511 | parameters.put("Window Size", gpf_win)
512 | parameters.put("Use coherence mask", gpf_cohmask)
513 | parameters.put("Coherence Threshold in[0,1]:", gpf_cohth)
514 | return GPF.createProduct("GoldsteinPhaseFiltering", parameters, product)
515 |
516 |
517 | # [P2] Function to create a subset with SNAP
518 | def subset(source, aoi, buffer):
519 | print('Subsetting...')
520 | wkt = read_aoi(aoi, buffer).wkt
521 | parameters.put('geoRegion', wkt)
522 | parameters.put('copyMetadata', True)
523 | output = GPF.createProduct('Subset', parameters, source)
524 | return output
525 |
526 |
527 | # [P3] Function to export to snaphu
528 | def snaphu_export(product, snaphu_exp_folder, tiles, cost_mode,
529 | tile_overlap_row, tile_overlap_col):
530 | print("Exporting to SNAPHU format...")
531 | parameters.put('targetFolder', snaphu_exp_folder)
532 | parameters.put('statCostMode', cost_mode)
533 | parameters.put('initMethod', 'MCF')
534 | parameters.put('numberOfTileCols', tiles)
535 | parameters.put('numberOfTileRows', tiles)
536 | parameters.put('rowOverlap', tile_overlap_row)
537 | parameters.put('colOverlap', tile_overlap_col)
538 | parameters.put('numberOfProcessors', 4)
539 | parameters.put('tileCostThreshold', 500)
540 | output = GPF.createProduct('SnaphuExport', parameters, product)
541 | ProductIO.writeProduct(output, snaphu_exp_folder, 'Snaphu')
542 | return output
543 |
544 |
545 | # [P3] Function for snaphu unwrapping
546 | # Unwrapping code adapted from:
547 | # https://forum.step.esa.int/t/snaphu-read-error-due-to-non-ascii-unreadable-file/14374/4
548 | def snaphu_unwrapping(snaphu_exp_folder):
549 | print('Unwrapping...')
550 | infile = os.path.join(snaphu_exp_folder, "snaphu.conf")
551 | with open(str(infile)) as lines:
552 | line = lines.readlines()[6]
553 | snaphu_string = line[1:].strip()
554 | snaphu_args = snaphu_string.split()
555 | print("\nCommand sent to snaphu:\n", snaphu_string)
556 | process = subprocess.Popen(snaphu_args, cwd=str(snaphu_exp_folder))
557 | process.communicate()
558 | process.wait()
559 |
560 | unw_img_file = glob.glob(os.path.join(output_dir, str("out_P2_snaphu" + "/UnwPhase*.img")))
561 | if not unw_img_file:
562 | raise ValueError("Snaphu unwrapping failed. Pipeline [P3] incomplete.")
563 |
564 | unwrapped_list = glob.glob(str(snaphu_exp_folder) + "/UnwPhase*.hdr")
565 | unwrapped_hdr = str(unwrapped_list[0])
566 | unwrapped_read = ProductIO.readProduct(unwrapped_hdr)
567 | fn = os.path.join(snaphu_exp_folder, "unwrapped")
568 | write_BEAM_DIMAP_format(unwrapped_read, fn)
569 | print('Phase unwrapping performed successfully.')
570 |
571 |
572 | # [P4] Function to import snaphu object
573 | def snaphu_import(product, unwrapped):
574 | print('Importing snaphu product...')
575 | snaphu_files = jpy.array('org.esa.snap.core.datamodel.Product', 2)
576 | snaphu_files[0] = product
577 | snaphu_files[1] = unwrapped
578 | output = GPF.createProduct("SnaphuImport", parameters, snaphu_files)
579 | return output
580 |
581 |
582 | # [P4] Function to transform phase to elevation
583 | def phase_to_elev(unwrapped_product, dem):
584 | print('Converting phase to elevation...')
585 | parameters.put("demName", dem)
586 | output = GPF.createProduct("PhaseToElevation", parameters, unwrapped_product)
587 | return output
588 |
589 |
590 | # [P4] Function to perform terrain correction
591 | def terrain_correction(source, band=None, projected=None, pixel_size=None):
592 | print('Terrain correction...')
593 | parameters.put('demName', args.dem)
594 | parameters.put('imgResamplingMethod', 'BILINEAR_INTERPOLATION')
595 | if projected:
596 | parameters.put('mapProjection', 'AUTO:42001')
597 | # parameters.put('saveProjectedLocalIncidenceAngle', False)
598 | if band is not None:
599 | parameters.put('sourceBands', band)
600 | parameters.put('saveSelectedSourceBand', True)
601 | parameters.put('nodataValueAtSea', False)
602 | parameters.put('pixelSpacingInMeter', pixel_size)
603 | output = GPF.createProduct('Terrain-Correction', parameters, source)
604 | return output
605 |
606 |
607 | # [P4] Function to combine two products
608 | def collocate(product1, product2):
609 | print('Collocating two products...')
610 | sourceProducts = HashMap()
611 | sourceProducts.put('master', product1)
612 | sourceProducts.put('slave', product2)
613 | parameters.put('renameMasterComponents', False)
614 | parameters.put('renameSlaveComponents', False)
615 | output = GPF.createProduct("Collocate", parameters, sourceProducts)
616 | return output
617 |
618 |
619 | # [P4] Function to set no data value for several bands
620 | def set_no_data(product, bands, ndval):
621 | for b in bands:
622 | product.getBand(b).setGeophysicalNoDataValue(ndval)
623 | product.getBand(b).setNoDataValueUsed(True)
624 | return product
625 |
626 |
627 | # [P4] Function to select band
628 | def band_select(product, bands):
629 | parameters = HashMap()
630 | parameters.put('sourceBands', bands)
631 | output = GPF.createProduct('Subset', parameters, product)
632 | return output
633 |
634 |
635 | # [P4] Filter elevation pixels by coherence threshold
636 | def filter_elevation(product, elev_band, coh_band, coh_th, nodata):
637 | bandmath_expression = 'if ' + coh_band + ' > ' + str(coh_th) + \
638 | ' then ' + elev_band + ' else ' + str(nodata)
639 | product.getBand(elev_band)
640 | BandDescriptor = jpy.get_type('org.esa.snap.core.gpf.common.BandMathsOp$BandDescriptor')
641 | targetBand1 = BandDescriptor()
642 | targetBand1.name = elev_band + '_filtered_cohth_' + str(coh_th)
643 | targetBand1.type = 'float32'
644 | targetBand1.expression = bandmath_expression
645 | targetBands = jpy.array('org.esa.snap.core.gpf.common.BandMathsOp$BandDescriptor', 1)
646 | targetBands[0] = targetBand1
647 | parameters = HashMap()
648 | parameters.put('targetBands', targetBands)
649 | output = GPF.createProduct('BandMaths', parameters, product)
650 | print(bandmath_expression)
651 | return output
652 |
653 |
654 | # Pipe functions
655 | # Pipeline 1 [P1]:
656 | def run_P1_interferogram(out_dir, file1, file2, polarization, aoi=None,
657 | topophaseremove=False, dem=None,
658 | ifg_squarepixel=None, ifg_cohwin_rg=None,
659 | ifg_cohwin_az=None,
660 | multilooking=None, ml_rangelooks=None,
661 | goldsteinfiltering=None,
662 | gpf_fftsize=None, gpf_win=None,
663 | gpf_cohmask=None, gpf_cohth=None,
664 | subsetting=None, subset_buffer=None):
665 | # Write user settings to log file
666 | file = open(os.path.join(out_dir, 'log.txt'), 'w')
667 | file.write(
668 | 'USER-SETTINGS FOR PIPELINE 1:\n' +
669 | 'ReferenceID path: ' + file1 + '\n' +
670 | 'ReferenceID date: ' + ref_date + '\n' +
671 | 'MatchID path: ' + file2 + '\n' +
672 | 'MatchID date: ' + mat_date + '\n' +
673 | 'Polarization: ' + polarization + '\n' +
674 | 'DEM for back-geocoding: ' + dem + '\n'
675 | )
676 | file.close
677 |
678 | # Read files in
679 | product_1 = read(file1)
680 | product_2 = read(file2)
681 |
682 | # Get pass & orbit from the input files
683 | pass_metadata_p1 = product_1.getMetadataRoot() \
684 | .getElement('Abstracted_Metadata') \
685 | .getAttribute('PASS').getData()
686 | relorbit_metadata_p1 = product_1.getMetadataRoot(). \
687 | getElement('Abstracted_Metadata') \
688 | .getAttribute('REL_ORBIT').getData()
689 | pass_metadata_p2 = product_2.getMetadataRoot() \
690 | .getElement('Abstracted_Metadata') \
691 | .getAttribute('PASS').getData()
692 | relorbit_metadata_p2 = product_2.getMetadataRoot(). \
693 | getElement('Abstracted_Metadata') \
694 | .getAttribute('REL_ORBIT').getData()
695 |
696 | if str(pass_metadata_p1) != str(pass_metadata_p2) and \
697 | str(relorbit_metadata_p1) != str(relorbit_metadata_p2):
698 | raise ValueError("Image pair orbits and pass do not match.")
699 |
700 | # Apply stsa workflow
701 | stsa_1 = get_swath_burst(file1, aoi, polar=polarization, product_idx=1)
702 | stsa_2 = get_swath_burst(file2, aoi, polar=polarization, product_idx=2)
703 |
704 | if not list(set(stsa_1['subswath'])) == list(set(stsa_2['subswath'])):
705 | # subswaths should be the same because each of them is
706 | # processed separately.
707 | raise ValueError("Subswaths intersecting the AOI do not match.")
708 | else:
709 | stsa_df = stsa_1.join(stsa_2.set_index('subswath'), on='subswath')
710 | stsa_df.lastBurstIndex_1.fillna(stsa_df.firstBurstIndex_1, inplace=True)
711 | stsa_df.lastBurstIndex_2.fillna(stsa_df.firstBurstIndex_2, inplace=True)
712 |
713 | # Compute InSAR stack overview
714 | # import the stack operator
715 | # From: https://forum.step.esa.int/t/insar-dinsar-perpendicular-baseline-calculation/3776/34
716 | stack = jpy.get_type('org.esa.s1tbx.insar.gpf.coregistration.CreateStackOp')
717 | stack.getBaselines([product_1, product_2], product_1)
718 | # Now there is a new piece of metadata in product one called 'Baselines'
719 | baseline_root_metadata = product_1.getMetadataRoot(). \
720 | getElement('Abstracted_Metadata').getElement('Baselines')
721 | # Write to log all the baselines between all master/slave configurations
722 | file = open(os.path.join(out_dir, 'log.txt'), 'a')
723 | file.write(
724 | 'Pass: ' + str(pass_metadata_p1) + '\n' +
725 | 'Orbit: ' + str(relorbit_metadata_p1) + '\n'
726 | )
727 | file.write('\nCOMPUTED STACKS IN PIPELINE 1:')
728 | master_ids = list(baseline_root_metadata.getElementNames())
729 | for master_id in master_ids:
730 | slave_ids = list(baseline_root_metadata.getElement(master_id).getElementNames())
731 | for slave_id in slave_ids:
732 | file.write(f'\n{master_id}, {slave_id}\n')
733 | baseline_metadata = baseline_root_metadata.getElement(master_id).getElement(slave_id)
734 | for baseline in list(baseline_metadata.getAttributeNames()):
735 | file.write(f'{baseline}: {baseline_metadata.getAttributeString(baseline)}\n')
736 | file.write('')
737 | file.close
738 |
739 | # Proceed to SNAP workflow
740 | # Write user settings to log file
741 | file = open(os.path.join(out_dir, 'log.txt'), 'a')
742 | file.write(
743 | '\nUSER-SETTINGS FOR PIPELINE 1 (cont.):\n' +
744 | 'Interferogram settings:'
745 | '- Square pixel: ' + str(ifg_squarepixel) + '\n' +
746 | '- Coherence range window size: ' + str(ifg_cohwin_rg) + '\n' +
747 | '- Coherence azimuth window size: ' + str(ifg_cohwin_az) + '\n' +
748 | 'Multi-looking applied: ' + str(multilooking) + '\n' +
749 | '- Multi-looking range: ' + str(ml_rangelooks) + '\n' +
750 | 'Goldstein filtering applied: ' + str(goldsteinfiltering) + '\n' +
751 | '- FFT size: ' + str(gpf_fftsize) + '\n' +
752 | '- Window size: ' + str(gpf_win) + '\n' +
753 | '- Coherence mask applied: ' + str(gpf_cohmask) + '\n' +
754 | '- Coherence mask threshold: ' + str(gpf_cohth) + '\n'
755 | )
756 | file.close
757 |
758 | def snap_single_iw(iw,
759 | firstBurstIndex_1, firstBurstIndex_2,
760 | lastBurstIndex_1, lastBurstIndex_2):
761 | # Write sub-swath and burst to log file
762 | file = open(os.path.join(out_dir, 'log.txt'), 'a')
763 | file.write(
764 | '\nAUTOMATICALLY EXTRACTED PARAMETERS IN PIPELINE 1:\n'
765 | 'Subswath: ' + iw + '\n' +
766 | 'Bursts 1: ' + str(firstBurstIndex_1) + ',' + str(lastBurstIndex_1) + '\n' +
767 | 'Bursts 2: ' + str(firstBurstIndex_2) + ',' + str(lastBurstIndex_2) + '\n')
768 | file.close
769 |
770 | product_TOPSAR_1 = topsar_split(product_1, iw,
771 | firstBurstIndex_1, lastBurstIndex_1)
772 | product_TOPSAR_2 = topsar_split(product_2, iw,
773 | firstBurstIndex_2, lastBurstIndex_2)
774 | product_orbitFile_1 = apply_orbit_file(product_TOPSAR_1)
775 | product_orbitFile_2 = apply_orbit_file(product_TOPSAR_2)
776 | product = back_geocoding([product_orbitFile_1, product_orbitFile_2], dem)
777 | if lastBurstIndex_1 - firstBurstIndex_1 > 1 or \
778 | lastBurstIndex_2 - firstBurstIndex_2 > 1:
779 | product = enhanced_spectral_diversity(product)
780 | out_filename = os.path.join(out_dir, 'out_' + iw)
781 | write_BEAM_DIMAP_format(product, out_filename)
782 |
783 | # takes result from previous pipeline
784 | product = interferogram(product,
785 | ifg_squarepixel, ifg_cohwin_rg, ifg_cohwin_az)
786 | product = topsar_deburst(product)
787 | return product
788 |
789 | if len(stsa_df) > 1:
790 | products = []
791 | for index, row in stsa_df.iterrows():
792 | product = snap_single_iw(
793 | iw=row['subswath'],
794 | firstBurstIndex_1=row['firstBurstIndex_1'],
795 | firstBurstIndex_2=row['firstBurstIndex_2'],
796 | lastBurstIndex_1=row['lastBurstIndex_1'],
797 | lastBurstIndex_2=row['lastBurstIndex_2']
798 | )
799 | products.append(product)
800 | product = topsar_merge(products)
801 | else:
802 | product = snap_single_iw(
803 | iw=stsa_df['subswath'].item(),
804 | firstBurstIndex_1=stsa_df['firstBurstIndex_1'].item(),
805 | firstBurstIndex_2=stsa_df['firstBurstIndex_2'].item(),
806 | lastBurstIndex_1=stsa_df['lastBurstIndex_1'].item(),
807 | lastBurstIndex_2=stsa_df['lastBurstIndex_2'].item()
808 | )
809 |
810 | if topophaseremove:
811 | product = topophase_removal(product, dem)
812 | if multilooking:
813 | product = multilook(product, ML_nRgLooks=ml_rangelooks)
814 | if goldsteinfiltering:
815 | product = goldstein_phase_filter(product,
816 | gpf_fftsize, gpf_win,
817 | gpf_cohmask, gpf_cohth)
818 | out_filename = os.path.join(out_dir, 'out_P1')
819 | write_BEAM_DIMAP_format(product, out_filename)
820 | if subsetting:
821 | product_ss = subset(product, aoi, buffer=subset_buffer)
822 | out_filename = os.path.join(out_dir, 'out_P1_subset')
823 | write_BEAM_DIMAP_format(product_ss, out_filename)
824 | print("Pipeline [P1] for interferogam creation complete")
825 |
826 |
827 | # [P2]:
828 | def run_P2_unwrap(out_dir, tiles, cost_mode, tile_overlap_row,
829 | tile_overlap_col, subset=None):
830 | # Write user settings to log file
831 | file = open(os.path.join(out_dir, 'log.txt'), 'a')
832 | file.write(
833 | '\nUSER-SETTINGS FOR PIPELINE 2:\n' +
834 | 'Tiles: ' + str(tiles) + '\n' +
835 | 'Tiles overlap: row ' + str(tile_overlap_row) +
836 | ', col ' + str(tile_overlap_row) + '\n' +
837 | 'Cost mode: ' + cost_mode + '\n'
838 | )
839 | file.close
840 |
841 | if subset:
842 | # takes subset result from previous pipeline
843 | in_filename = os.path.join(out_dir, 'out_P1_subset')
844 | product = read(in_filename + ".dim") # reads .dim
845 | else:
846 | # takes result from previous pipeline
847 | in_filename = os.path.join(out_dir, 'out_P1')
848 | product = read(in_filename + ".dim") # reads .dim
849 | out_dir_snaphu = os.path.join(output_dir, "out_P2_snaphu")
850 | snaphu_export(product, out_dir_snaphu, tiles, cost_mode,
851 | tile_overlap_row, tile_overlap_col)
852 | snaphu_unwrapping(out_dir_snaphu)
853 | print("Pipeline [P2] for product unwrapping complete")
854 |
855 |
856 | # [P3]:
857 | def run_P3_elevation(out_dir, dem=None, subset=None, proj=None,
858 | pixel_size=None, coherence_threshold=None, nodataval=None):
859 | # Write user settings to log file
860 | file = open(os.path.join(out_dir, 'log.txt'), 'a')
861 | file.write(
862 | '\nUSER-SETTINGS FOR PIPELINE 3:\n' +
863 | 'Pixel size: ' + str(pixel_size) + '\n' +
864 | 'Coherence threshold: ' + str(coherence_threshold) + '\n' +
865 | 'No data value: ' + str(nodataval) + '\n'
866 | )
867 | file.close
868 | if subset:
869 | # takes subset result from previous pipeline
870 | in_filename = os.path.join(out_dir, 'out_P1_subset')
871 | product = read(in_filename + ".dim") # reads .dim
872 | else:
873 | # takes result from previous pipeline
874 | in_filename = os.path.join(out_dir, 'out_P1')
875 | product = read(in_filename + ".dim") # reads .dim
876 |
877 | out_dir_snaphu = os.path.join(output_dir, "out_P2_snaphu")
878 | unwrapped_fn = os.path.join(out_dir_snaphu, 'unwrapped')
879 | unwrapped = read(unwrapped_fn + ".dim")
880 | product_unwrapped = snaphu_import(product, unwrapped)
881 | elevation = phase_to_elev(product_unwrapped, dem)
882 | # Combine elevation product with intensity, coherence, wrapped and unwrapped phase
883 | product_comb = collocate(product_unwrapped, elevation)
884 | bands_comb = list(product_comb.getBandNames())
885 | product_comb = set_no_data(product_comb, bands_comb[2:7], nodataval)
886 | # Terrain correct
887 | product_comb_tc = terrain_correction(product_comb, band=','.join(bands_comb[2:7]),
888 | projected=proj, pixel_size=pixel_size)
889 | # Create new elevation band with valid pixels above coherence threshold
890 | bands_comb_tc = list(product_comb_tc.getBandNames())
891 | elev_filter = filter_elevation(product_comb_tc,
892 | bands_comb_tc[4], bands_comb_tc[2],
893 | coherence_threshold, nodataval)
894 | product_comb_tc_filtered = collocate(product_comb_tc, elev_filter)
895 | bands_comb_tc_filtered = list(product_comb_tc_filtered.getBandNames())
896 | # Save to BEAM-DIMAP
897 | out_filename = os.path.join(out_dir, 'out_P3')
898 | write_BEAM_DIMAP_format(product_comb_tc_filtered, out_filename)
899 | # Save each band to a TIFF
900 | print(bands_comb_tc_filtered[0])
901 | intensity = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[0])
902 | intensity_tiff = os.path.join(out_dir, date_bundle + '_intensity.tif')
903 | write_TIFF_format(intensity, intensity_tiff)
904 | wrapped = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[1])
905 | wrapped_tiff = os.path.join(out_dir, date_bundle + '_wrapped_phase.tif')
906 | write_TIFF_format(wrapped, wrapped_tiff)
907 | coherence = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[2])
908 | coherence_tiff = os.path.join(out_dir, date_bundle + '_coherence.tif')
909 | write_TIFF_format(coherence, coherence_tiff)
910 | unwrapped = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[3])
911 | unwrapped_tiff = os.path.join(out_dir, date_bundle + '_unwrapped_phase.tif')
912 | write_TIFF_format(unwrapped, unwrapped_tiff)
913 | elevation = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[4])
914 | elevation_tiff = os.path.join(out_dir, date_bundle + '_elevation.tif')
915 | write_TIFF_format(elevation, elevation_tiff)
916 | elevation_cohth = band_select(product_comb_tc_filtered, bands=bands_comb_tc_filtered[5])
917 | elevation_cohth_tiff = os.path.join(out_dir, date_bundle +
918 | '_elevation_filtered_cohth_' +
919 | str(coherence_threshold) + '.tif')
920 | write_TIFF_format(elevation_cohth, elevation_cohth_tiff)
921 | print("Pipeline [P3] for DEM generation complete")
922 |
923 |
924 | # Run the workflow
925 | if 'P1' in args.pipelines:
926 | run_P1_interferogram(
927 | file1=file_path_1, file2=file_path_2,
928 | aoi=args.aoi_path, polarization=args.polarization,
929 | dem=args.dem, out_dir=output_dir,
930 | ifg_squarepixel=not args.ifg_indwinsize,
931 | ifg_cohwin_rg=args.ifg_cohwin_rg,
932 | ifg_cohwin_az=args.ifg_cohwin_az,
933 | multilooking=args.no_multilook,
934 | ml_rangelooks=args.multilook_range,
935 | goldsteinfiltering=args.no_goldstein,
936 | gpf_fftsize=args.gpf_fftsize,
937 | gpf_win=args.gpf_win,
938 | gpf_cohmask=args.gpf_cohmask,
939 | gpf_cohth=args.gpf_cohth,
940 | subsetting=args.no_subset,
941 | subset_buffer=args.aoi_buffer
942 | )
943 |
944 | if 'P2' in args.pipelines:
945 | run_P2_unwrap(
946 | out_dir=output_dir,
947 | tiles=args.snaphu_tiles,
948 | cost_mode=args.snaphu_costmode,
949 | tile_overlap_row=args.snaphu_tile_overlap_row,
950 | tile_overlap_col=args.snaphu_tile_overlap_col,
951 | subset=args.no_subset
952 | )
953 |
954 | if 'P3' in args.pipelines:
955 | run_P3_elevation(
956 | out_dir=output_dir,
957 | dem=args.dem,
958 | proj=args.no_output_projected,
959 | subset=args.no_subset,
960 | pixel_size=args.pixel_size,
961 | coherence_threshold=args.coherence_threshold,
962 | nodataval=args.no_data_value
963 | )
964 |
--------------------------------------------------------------------------------