├── .github
└── workflows
│ └── tests.yml
├── .gitignore
├── CONTRIBUTING.rst
├── LICENSE.txt
├── README.rst
├── assets
├── dt_cimis.ini
├── dt_cleanup_daily_image.py
├── dt_export_daily_image.py
├── dt_gridmet.ini
└── tmax_climo_ee_asset.py
├── docs
├── ET_example.PNG
├── SSEBopLogo.jpg
├── SSEBopLogo.png
├── SSEBopLogoBW.PNG
├── SSEBopLogoBW.jpg
└── SSEBop_GEE_diagram.jpg
├── examples
├── collection_interpolate.ipynb
├── collection_overpass.ipynb
├── elr_testing.ipynb
├── image_mapping.ipynb
├── image_test_output.ipynb
├── lst_source.ipynb
├── single_image.ipynb
├── tcorr_fano.ipynb
└── test.py
├── openet
├── __init__.py
└── ssebop
│ ├── __init__.py
│ ├── collection.py
│ ├── image.py
│ ├── interpolate.py
│ ├── ipytest.py
│ ├── landsat.py
│ ├── model.py
│ ├── tests
│ ├── conftest.py
│ ├── test_a_utils.py
│ ├── test_b_landsat.py
│ ├── test_b_model.py
│ ├── test_c_image.py
│ ├── test_d_collection.py
│ └── test_d_interpolate.py
│ └── utils.py
└── pyproject.toml
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 |
9 | permissions:
10 | contents: read
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v3
17 | - name: Set up Python 3.10
18 | uses: actions/setup-python@v3
19 | with:
20 | python-version: "3.10"
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install .[test]
25 | pip install --upgrade pytest
26 | - name: Lint with flake8
27 | run: |
28 | pip install flake8
29 | # stop the build if there are Python syntax errors or undefined names
30 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
31 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
32 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
33 | - name: Test with pytest
34 | env:
35 | EE_PRIVATE_KEY_B64: ${{ secrets.EE_PRIVATE_KEY_B64 }}
36 | run: |
37 | python -m pytest
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # Distribution / packaging
7 | build/
8 | dist/
9 | .eggs/
10 | *.egg-info/
11 |
12 | # Unit test / coverage reports
13 | .cache
14 | .coverage
15 |
16 | # PyTest
17 | .pytest_cache
18 |
19 | # Jupyter Notebook
20 | .ipynb_checkpoints
21 |
22 | # PyCharm project settings
23 | .idea
24 |
25 | # MacOS
26 | .DS_Store
27 |
28 | # Earth Engine keys
29 | privatekey.json
30 | privatekey.b64
31 |
32 | # Coverage
33 | .codecov.yml
34 | .coveralls.yml
35 | coverage.xml
36 |
37 | # Miscellaneous files
38 | notes.txt
39 |
--------------------------------------------------------------------------------
/CONTRIBUTING.rst:
--------------------------------------------------------------------------------
1 | =============================
2 | Contributing to OpenET SSEBop
3 | =============================
4 |
5 | Thank you for your interest in supporting the OpenET SSEBop project.
6 |
7 | Versioning
8 | ==========
9 |
10 | The OpenET SSEBop project is working toward a version 1.0 release that will natively support being run globally. Until that time the model will be making 0.X releases for a changes that are expected to change output values, and 0.X.Y release for any minor patch updates that are not expected to change output values.
11 |
12 | Coding Conventions
13 | ==================
14 |
15 | OpenET SSEBop was developed for Python 3.6. The code will likely work on other version of Python 3 but there are no plans to officially support Python 2.7 at this time.
16 |
17 | All code should follow the `PEP8 `__ style guide.
18 |
19 | Docstrings should be written for all functions that follow the `NumPy docstring format `__.
20 |
21 | Development
22 | ===========
23 |
24 | Conda Environment
25 | -----------------
26 |
27 | For local application, development, and testing, the user is strongly encouraged to create a dedicated "openet" conda environment.
28 |
29 | Create the conda environment:
30 |
31 | .. code-block:: console
32 |
33 | conda create --name openet python=3.11
34 |
35 | Activate the environment:
36 |
37 | .. code-block:: console
38 |
39 | conda activate openet
40 |
41 | Install additional Python modules using conda (and pip for modules not currently available via conda):
42 |
43 | .. code-block:: console
44 |
45 | conda install earthengine-api pytest
46 | pip install openet-core --no-deps
47 |
48 | Updating OpenET Module
49 | ----------------------
50 |
51 | While developing the "ssebop" module, pip can be used to quickly update the module in the "openet" environment if needed.
52 |
53 | .. code-block:: console
54 |
55 | pip install . --no-deps
56 |
57 | Testing
58 | =======
59 |
60 | PyTest
61 | ------
62 |
63 | Testing is done using `pytest `__.
64 |
65 | .. code-block:: console
66 |
67 | python -m pytest
68 |
69 | Detailed testing results can be obtained using the "-v" and/or "-s" tags.
70 |
71 | .. code-block:: console
72 |
73 | python -m pytest -v -s
74 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ===============
2 | OpenET - SSEBop
3 | ===============
4 |
5 | |version| |build|
6 |
7 | **WARNING: This code is in development, is being provided without support, and is subject to change at any time without notification**
8 |
9 | This repository provides `Google Earth Engine `__ Python API based implementation of the SSEBop ET model.
10 |
11 | The Operational Simplified Surface Energy Balance (SSEBop) model computes daily total actual evapotranspiration (ETa) using land surface temperature (Ts), maximum air temperature (Ta) and reference ET (ETr or ETo).
12 | The SSEBop model does not solve all the energy balance terms explicitly; rather, it defines the limiting conditions based on "gray-sky" net radiation balance principles and an air temperature parameter.
13 | This approach predefines unique sets of "hot/dry" and "cold/wet" limiting values for each pixel, allowing an operational model setup and a relatively shorter compute time. More information on the GEE implementation of SSEBop is published in Senay2022_ and Senay2023_ with additional details and model assessment.
14 |
15 | *Basic SSEBop model implementation in Earth Engine:*
16 |
17 | .. image:: docs/SSEBop_GEE_diagram.jpg
18 |
19 | Model Design
20 | ============
21 |
22 | The primary component of the SSEBop model is the Image() class. The Image class can be used to compute a single fraction of reference ET (ETf) image from a single input image. The Image class should generally be instantiated from an Earth Engine Landsat image using the collection specific methods listed below. ET image collections can be built by computing ET in a function that is mapped over a collection of input images. Please see the `Example Notebooks`_ for more details.
23 |
24 | Input Collections
25 | =================
26 |
27 | SSEBop ET can currently be computed for Landsat Collection 2 Level 2 (SR/ST) images from the following Earth Engine image collections:
28 |
29 | * LANDSAT/LC09/C02/T1_L2
30 | * LANDSAT/LC08/C02/T1_L2
31 | * LANDSAT/LE07/C02/T1_L2
32 | * LANDSAT/LT05/C02/T1_L2
33 |
34 | **Note:** Users are encouraged to prioritize use of Collection 2 data where available. Collection 1 was produced by USGS until 2022-01-01, and maintained by Earth Engine until 2023-01-01. [`More Information `__]
35 |
36 | Landsat Collection 2 SR/ST Input Image
37 | --------------------------------------
38 |
39 | To instantiate the class for a Landsat Collection 2 SR/ST image, use the Image.from_landsat_c2_sr method.
40 |
41 | The input Landsat image must have the following bands and properties:
42 |
43 | ================= ======================================
44 | SPACECRAFT_ID Band Names
45 | ================= ======================================
46 | LANDSAT_5 SR_B1, SR_B2, SR_B3, SR_B4, SR_B5, SR_B7, ST_B6, QA_PIXEL
47 | LANDSAT_7 SR_B1, SR_B2, SR_B3, SR_B4, SR_B5, SR_B7, ST_B6, QA_PIXEL
48 | LANDSAT_8 SR_B1, SR_B2, SR_B3, SR_B4, SR_B5, SR_B6, SR_B7, ST_B10, QA_PIXEL
49 | LANDSAT_9 SR_B1, SR_B2, SR_B3, SR_B4, SR_B5, SR_B6, SR_B7, ST_B10, QA_PIXEL
50 | ================= ======================================
51 |
52 | Model Output
53 | ------------
54 |
55 | The primary output of the SSEBop model is the fraction of reference ET (ETf). The actual ET (ETa) can then be computed by multiplying the Landsat-based ETf image with the reference ET (e.g. ETr from GRIDMET).
56 |
57 | *Example SSEBop ETa from Landsat:*
58 |
59 | .. image:: docs/ET_example.PNG
60 |
61 | Example
62 | -------
63 |
64 | .. code-block:: python
65 |
66 | import openet.ssebop as ssebop
67 |
68 | landsat_img = ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_044033_20170716')
69 | et_fraction = ssebop.Image.from_landsat_c2_sr(landsat_img).et_fraction
70 | et_reference = ee.Image('IDAHO_EPSCOR/GRIDMET/20170716').select('etr')
71 | et_actual = et_fraction.multiply(et_reference)
72 |
73 | Example Notebooks
74 | =================
75 |
76 | Detailed Jupyter Notebooks of the various approaches for calling the OpenET SSEBop model are provided in the "examples" folder.
77 |
78 | + `Computing daily ET for a single Landsat image `__
79 | + `Computing a daily ET image collection from Landsat image collection `__
80 | + `Computing monthly ET from a collection `__
81 |
82 | Ancillary Datasets
83 | ==================
84 |
85 | Land Surface Temperature (LST)
86 | ------------------------------
87 | Land Surface Temperature is currently calculated in the SSEBop approach two ways:
88 |
89 | * Landsat Collection 2 Level-2 (ST band) images directly. More information can be found at: `USGS Landsat Collection 2 Level-2 Science Products `__
90 |
91 | Temperature Difference (dT)
92 | ---------------------------
93 | The SSEBop ET model uses dT as a predefined temperature difference between Thot and Tcold for each pixel.
94 | In SSEBop formulation, hot and cold limits are defined on the same pixel; therefore, dT actually represents the vertical temperature difference between the surface temperature of a theoretical bare/dry condition of a given pixel and the air temperature at the canopy level of the same pixel as explained in Senay2018_. The input dT is calculated under "gray-sky" conditions and assumed not to change from year to year, but is unique for each day and location.
95 |
96 | Default Asset ID: *projects/usgs-ssebop/dt/daymet_median_v7*
97 |
98 | Cold Boundary Temperature (Tcold)
99 | -----------------------------------
100 | In order to determine the theoretical LST corresponding to cold/wet limiting environmental conditions (Tcold), the
101 | SSEBop model uses a Forcing and Normalizing Operation (FANO) method, featuring a linear relation between a normalized
102 | land surface temperature difference and NDVI difference using the dT parameter and a proportionality constant.
103 |
104 | More information on parameter design and model improvements using the FANO method can be found in Senay2023_. Additional SSEBop model algorithm theoretical basis documentation can be found `here `__.
105 |
106 | .. code-block:: python
107 |
108 | model_obj = model.Image.from_landsat_c2_sr(
109 | ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_044033_20170716'),
110 | )
111 |
112 | The FANO parameterization allows the establishment of the cold boundary condition regardless of vegetation cover density, improving the performance and operational implementation of the SSEBop ET model in sparsely vegetated landscapes, dynamic growing seasons, and varying locations around the world.
113 |
114 | Installation
115 | ============
116 |
117 | The OpenET SSEBop python module can be installed via pip:
118 |
119 | .. code-block:: console
120 |
121 | pip install openet-ssebop
122 |
123 | Dependencies
124 | ============
125 |
126 | * `earthengine-api `__
127 | * `openet-core `__
128 |
129 | OpenET Namespace Package
130 | ========================
131 |
132 | Each OpenET model is stored in the "openet" folder (namespace). The model can then be imported as a "dot" submodule of the main openet module.
133 |
134 | .. code-block:: console
135 |
136 | import openet.ssebop as ssebop
137 |
138 | Development and Testing
139 | =======================
140 |
141 | Please see the `CONTRIBUTING.rst `__.
142 |
143 | References
144 | ==========
145 |
146 | .. _references:
147 |
148 | .. [Senay2013]
149 | | Senay, G., Bohms, S., Singh, R., Gowda, P., Velpuri, N., Alemu, H., Verdin, J. (2013). Operational Evapotranspiration Mapping Using Remote Sensing and Weather Datasets: A New Parameterization for the SSEB Approach. *Journal of the American Water Resources Association*, 49(3).
150 | | `https://doi.org/10.1111/jawr.12057 `__
151 | .. [Senay2016]
152 | | Senay, G., Friedrichs, M., Singh, R., Velpui, N. (2016). Evaluating Landsat 8 evapotranspiration for water use mapping in the Colorado River Basin. *Remote Sensing of Environment*, 185.
153 | | `https://doi.org/10.1016/j.rse.2015.12.043 `__
154 | .. [Senay2017]
155 | | Senay, G., Schauer, M., Friedrichs, M., Manohar, V., Singh, R. (2017). Satellite-based water use dynamics using historical Landsat data (1984\-2014) in the southwestern United States. *Remote Sensing of Environment*, 202.
156 | | `https://doi.org/10.1016/j.rse.2017.05.005 `__
157 | .. [Senay2018]
158 | | Senay, G. (2018). Satellite Psychrometric Formulation of the Operational Simplified Surface Energy Balance (SSEBop) Model for Quantifying and Mapping Evapotranspiration. *Applied Engineering in Agriculture*, 34(3).
159 | | `https://doi.org/10.13031/aea.12614 `__
160 | .. [Senay2019]
161 | | Senay, G., Schauer, M., Velpuri, N.M., Singh, R.K., Kagone, S., Friedrichs, M., Litvak, M.E., Douglas-Mankin, K.R. (2019). Long-Term (1986–2015) Crop Water Use Characterization over the Upper Rio Grande Basin of United States and Mexico Using Landsat-Based Evapotranspiration. *Remote Sensing*, 11(13):1587.
162 | | `https://doi.org/10.3390/rs11131587 `__
163 | .. [Schauer2019]
164 | | Schauer, M., Senay, G. (2019). Characterizing Crop Water Use Dynamics in the Central Valley of California Using Landsat-Derived Evapotranspiration. *Remote Sensing*, 11(15):1782.
165 | | `https://doi.org/10.3390/rs11151782 `__
166 | .. [Senay2022]
167 | | Senay, G.B., Friedrichs, M., Morton, C., Parrish, G. E., Schauer, M., Khand, K., ... & Huntington, J. (2022). Mapping actual evapotranspiration using Landsat for the conterminous United States: Google Earth Engine implementation and assessment of the SSEBop model. *Remote Sensing of Environment*, 275, 113011
168 | | `https://doi.org/10.1016/j.rse.2022.113011 `__
169 | .. [Senay2023]
170 | | Senay, G.B., Parrish, G. E., Schauer, M., Friedrichs, M., Khand, K., Boiko, O., Kagone, S., Dittmeier, R., Arab, S., Ji, L. (2023). Improving the Operational Simplified Surface Energy Balance evapotranspiration model using the Forcing and Normalizing Operation. *Remote Sensing*, 15(1):260.
171 | | `https://doi.org/10.3390/rs15010260 `__
172 |
173 | .. |build| image:: https://github.com/Open-ET/openet-ssebop/actions/workflows/tests.yml/badge.svg
174 | :alt: Build status
175 | :target: https://github.com/Open-ET/openet-ssebop
176 | .. |version| image:: https://badge.fury.io/py/openet-ssebop.svg
177 | :alt: Latest version on PyPI
178 | :target: https://badge.fury.io/py/openet-ssebop
179 |
--------------------------------------------------------------------------------
/assets/dt_cimis.ini:
--------------------------------------------------------------------------------
1 | # CIMIS dT Export Input File
2 |
3 | [INPUTS]
4 | # Date range
5 | # Start/end date will not be read in cron mode
6 | start_date = 2019-01-01
7 | end_date = 2019-06-30
8 |
9 | # start_date = 2003-10-01
10 | # end_date = 2018-12-31
11 |
12 |
13 | [EXPORT]
14 | # Export Destination (only ASSET is currently supported for Tcorr images)
15 | export_dest = ASSET
16 |
17 | # Project folder for the dT image collection
18 | # The collection name is computed from the dt_source
19 | export_coll = projects/usgs-ssebop/dt
20 |
21 | # Image name format
22 | export_id_fmt = dt_{product}_{date}_{export}_{dest}
23 |
24 |
25 | [SSEBOP]
26 | # Tmax choices: CIMIS, DAYMET, GRIDMET
27 | dt_source = CIMIS
28 |
29 | elev_source = SRTM
30 |
31 | dt_min = 1
32 | dt_max = 25
33 |
--------------------------------------------------------------------------------
/assets/dt_cleanup_daily_image.py:
--------------------------------------------------------------------------------
1 | #--------------------------------
2 | # Name: dt_cleanup_daily_image.py
3 | # Purpose: Remove earlier versions of daily dT images
4 | #--------------------------------
5 |
6 | import argparse
7 | from collections import defaultdict
8 | import configparser
9 | import datetime
10 | import logging
11 | import os
12 | import pprint
13 | import re
14 | import sys
15 |
16 | import ee
17 |
18 | import openet.core.utils as utils
19 |
20 |
21 | def main(ini_path=None):
22 | """Remove earlier versions of daily dT images
23 |
24 | Parameters
25 | ----------
26 | ini_path : str
27 | Input file path.
28 |
29 | """
30 | logging.info('\nRemove earlier versions of daily dT images')
31 |
32 | ini = read_ini(ini_path)
33 |
34 | model_name = 'SSEBOP'
35 | # model_name = ini['INPUTS']['et_model'].upper()
36 |
37 | start_dt = datetime.datetime.strptime(
38 | ini['INPUTS']['start_date'], '%Y-%m-%d')
39 | end_dt = datetime.datetime.strptime(
40 | ini['INPUTS']['end_date'], '%Y-%m-%d')
41 | logging.debug('Start Date: {}'.format(start_dt.strftime('%Y-%m-%d')))
42 | logging.debug('End Date: {}\n'.format(end_dt.strftime('%Y-%m-%d')))
43 |
44 | try:
45 | dt_source = str(ini[model_name]['dt_source'])
46 | logging.debug('\ndt_source:\n {}'.format(dt_source))
47 | except KeyError:
48 | logging.error(' dt_source: must be set in INI')
49 | sys.exit()
50 | if dt_source.upper() not in ['CIMIS', 'DAYMET', 'GRIDMET']:
51 | raise ValueError('dt_source must be CIMIS, DAYMET, or GRIDMET')
52 |
53 | # Output dT daily image collection
54 | dt_daily_coll_id = '{}/{}_daily'.format(
55 | ini['EXPORT']['export_coll'], ini[model_name]['dt_source'].lower())
56 | logging.debug(' {}'.format(dt_daily_coll_id))
57 |
58 |
59 | logging.info('\nInitializing Earth Engine')
60 | ee.Initialize()
61 | ee.Number(1).getInfo()
62 |
63 |
64 | # Get list of existing images/files
65 | logging.debug('\nGetting GEE asset list')
66 | asset_list = utils.get_ee_assets(dt_daily_coll_id)
67 | logging.debug('Displaying first 10 images in collection')
68 | logging.debug(asset_list[:10])
69 |
70 |
71 | # Filter asset list by INI start_date and end_date
72 | logging.debug('\nFiltering by INI start_date and end_date')
73 | asset_re = re.compile('[\w_]+/(\d{8})_\d{8}')
74 | asset_list = [
75 | asset_id for asset_id in asset_list
76 | if (start_dt <= datetime.datetime.strptime(asset_re.findall(asset_id)[0], '%Y%m%d') and
77 | datetime.datetime.strptime(asset_re.findall(asset_id)[0], '%Y%m%d') <= end_dt)]
78 | if not asset_list:
79 | logging.info('Empty asset ID list after filter by start/end date, '
80 | 'exiting')
81 | return True
82 | logging.debug('Displaying first 10 images in collection')
83 | logging.debug(asset_list[:10])
84 |
85 |
86 | # Group asset IDs by image date
87 | asset_id_dict = defaultdict(list)
88 | for asset_id in asset_list:
89 | asset_dt = datetime.datetime.strptime(
90 | asset_id.split('/')[-1].split('_')[0], '%Y%m%d')
91 | asset_id_dict[asset_dt.strftime('%Y-%m-%d')].append(asset_id)
92 | # pprint.pprint(asset_id_dict)
93 |
94 |
95 | # Remove all but the last image when sorted by export date
96 | logging.info('\nRemoving assets')
97 | for key, asset_list in asset_id_dict.items():
98 | # logging.debug('{}'.format(key))
99 | if len(asset_list) >=2:
100 | for asset_id in sorted(asset_list)[:-1]:
101 | logging.info(' Delete: {}'.format(asset_id))
102 | try:
103 | ee.data.deleteAsset(asset_id)
104 | except Exception as e:
105 | logging.info(' Unhandled exception, skipping')
106 | logging.debug(e)
107 | continue
108 |
109 |
110 | def read_ini(ini_path):
111 | logging.debug('\nReading Input File')
112 | # Open config file
113 | config = configparser.ConfigParser()
114 | try:
115 | config.read(ini_path)
116 | except Exception as e:
117 | logging.error(
118 | '\nERROR: Input file could not be read, '
119 | 'is not an input file, or does not exist\n'
120 | ' ini_path={}\n\nException: {}'.format(ini_path, e))
121 | sys.exit()
122 |
123 | # Force conversion of unicode to strings
124 | ini = dict()
125 | for section in config.keys():
126 | ini[str(section)] = {}
127 | for k, v in config[section].items():
128 | ini[str(section)][str(k)] = v
129 | return ini
130 |
131 |
132 | def arg_parse():
133 | """"""
134 | parser = argparse.ArgumentParser(
135 | description='Remove earlier versions of daily dT images',
136 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
137 | parser.add_argument(
138 | '-i', '--ini', type=utils.arg_valid_file,
139 | help='Input file', metavar='FILE')
140 | parser.add_argument(
141 | '--debug', default=logging.INFO, const=logging.DEBUG,
142 | help='Debug level logging', action='store_const', dest='loglevel')
143 | args = parser.parse_args()
144 |
145 | return args
146 |
147 |
148 | if __name__ == "__main__":
149 | args = arg_parse()
150 | logging.basicConfig(level=args.loglevel, format='%(message)s')
151 |
152 | main(ini_path=args.ini)
153 |
--------------------------------------------------------------------------------
/assets/dt_export_daily_image.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from collections import defaultdict
3 | import configparser
4 | import datetime
5 | import logging
6 | import os
7 | import pprint
8 | import re
9 | import sys
10 |
11 | import ee
12 |
13 | import openet.ssebop as ssebop
14 | import openet.core.utils as utils
15 |
16 |
17 | def main(ini_path=None, overwrite_flag=False, delay=0, key=None,
18 | reverse_flag=False):
19 | """Compute daily dT images
20 |
21 | Parameters
22 | ----------
23 | ini_path : str
24 | Input file path.
25 | overwrite_flag : bool, optional
26 | If True, generate new images (but with different export dates) even if
27 | the dates already have images. If False, only generate images for
28 | dates that are missing. The default is False.
29 | delay : float, optional
30 | Delay time between each export task (the default is 0).
31 | key : str, optional
32 | File path to an Earth Engine json key file (the default is None).
33 | reverse_flag : bool, optional
34 | If True, process dates in reverse order.
35 |
36 | """
37 | logging.info('\nCompute daily dT images')
38 |
39 | ini = read_ini(ini_path)
40 |
41 | model_name = 'SSEBOP'
42 | # model_name = ini['INPUTS']['et_model'].upper()
43 |
44 | if ini[model_name]['dt_source'].upper() == 'CIMIS':
45 | daily_coll_id = 'projects/climate-engine/cimis/daily'
46 | elif ini[model_name]['dt_source'].upper() == 'DAYMET':
47 | daily_coll_id = 'NASA/ORNL/DAYMET_V3'
48 | elif ini[model_name]['dt_source'].upper() == 'GRIDMET':
49 | daily_coll_id = 'IDAHO_EPSCOR/GRIDMET'
50 | else:
51 | raise ValueError('dt_source must be CIMIS, DAYMET, or GRIDMET')
52 |
53 | # Check dates
54 | if (ini[model_name]['dt_source'].upper() == 'CIMIS' and
55 | ini['INPUTS']['end_date'] < '2003-10-01'):
56 | logging.error(
57 | '\nCIMIS is not currently available before 2003-10-01, exiting\n')
58 | sys.exit()
59 | elif (ini[model_name]['dt_source'].upper() == 'DAYMET' and
60 | ini['INPUTS']['end_date'] > '2017-12-31'):
61 | logging.warning(
62 | '\nDAYMET is not currently available past 2017-12-31, '
63 | 'using median Tmax values\n')
64 | # sys.exit()
65 | # elif (ini[model_name]['dt_source'].upper() == 'TOPOWX' and
66 | # ini['INPUTS']['end_date'] > '2017-12-31'):
67 | # logging.warning(
68 | # '\nDAYMET is not currently available past 2017-12-31, '
69 | # 'using median Tmax values\n')
70 | # # sys.exit()
71 |
72 | logging.info('\nInitializing Earth Engine')
73 | if key:
74 | logging.info(' Using service account key file: {}'.format(key))
75 | # The "EE_ACCOUNT" parameter is not used if the key file is valid
76 | ee.Initialize(ee.ServiceAccountCredentials('deadbeef', key_file=key))
77 | else:
78 | ee.Initialize()
79 |
80 | # Output dT daily image collection
81 | dt_daily_coll_id = '{}/{}_daily'.format(
82 | ini['EXPORT']['export_coll'], ini[model_name]['dt_source'].lower())
83 |
84 |
85 | # Get an input image to set the dT values to
86 | logging.debug('\nInput properties')
87 | dt_name = ini[model_name]['dt_source']
88 | dt_source = dt_name.split('_', 1)[0]
89 | # dt_version = dt_name.split('_', 1)[1]
90 | daily_coll = ee.ImageCollection(daily_coll_id)
91 | dt_img = ee.Image(daily_coll.first()).select([0])
92 | dt_mask = dt_img.multiply(0)
93 | logging.debug(' Collection: {}'.format(daily_coll_id))
94 | logging.debug(' Source: {}'.format(dt_source))
95 | # logging.debug(' Version: {}'.format(dt_version))
96 |
97 | logging.debug('\nExport properties')
98 | export_proj = dt_img.projection().getInfo()
99 | export_geo = export_proj['transform']
100 | if 'crs' in export_proj.keys():
101 | export_crs = export_proj['crs']
102 | elif 'wkt' in export_proj.keys():
103 | export_crs = re.sub(',\s+', ',', export_proj['wkt'])
104 | export_shape = dt_img.getInfo()['bands'][0]['dimensions']
105 | export_extent = [
106 | export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
107 | export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]]
108 | logging.debug(' CRS: {}'.format(export_crs))
109 | logging.debug(' Extent: {}'.format(export_extent))
110 | logging.debug(' Geo: {}'.format(export_geo))
111 | logging.debug(' Shape: {}'.format(export_shape))
112 |
113 | # Get current asset list
114 | if ini['EXPORT']['export_dest'].upper() == 'ASSET':
115 | logging.debug('\nGetting asset list')
116 | # DEADBEEF - daily is hardcoded in the asset_id for now
117 | asset_list = utils.get_ee_assets(dt_daily_coll_id)
118 | else:
119 | raise ValueError('invalid export destination: {}'.format(
120 | ini['EXPORT']['export_dest']))
121 |
122 | # Get current running tasks
123 | tasks = utils.get_ee_tasks()
124 | if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
125 | utils.print_ee_tasks()
126 | input('ENTER')
127 |
128 | # Limit by year and month
129 | try:
130 | month_list = sorted(list(utils.parse_int_set(ini['INPUTS']['months'])))
131 | except:
132 | logging.info('\nINPUTS "months" parameter not set in the INI,'
133 | '\n Defaulting to all months (1-12)\n')
134 | month_list = list(range(1, 13))
135 | # try:
136 | # year_list = sorted(list(utils.parse_int_set(ini['INPUTS']['years'])))
137 | # except:
138 | # logging.info('\nINPUTS "years" parameter not set in the INI,'
139 | # '\n Defaulting to all available years\n')
140 | # year_list = []
141 |
142 |
143 | # Group asset IDs by image date
144 | asset_id_dict = defaultdict(list)
145 | for asset_id in asset_list:
146 | asset_dt = datetime.datetime.strptime(
147 | asset_id.split('/')[-1].split('_')[0], '%Y%m%d')
148 | asset_id_dict[asset_dt.strftime('%Y-%m-%d')].append(asset_id)
149 | # pprint.pprint(export_dt_dict)
150 |
151 |
152 | iter_start_dt = datetime.datetime.strptime(
153 | ini['INPUTS']['start_date'], '%Y-%m-%d')
154 | iter_end_dt = datetime.datetime.strptime(
155 | ini['INPUTS']['end_date'], '%Y-%m-%d')
156 | logging.debug('Start Date: {}'.format(iter_start_dt.strftime('%Y-%m-%d')))
157 | logging.debug('End Date: {}\n'.format(iter_end_dt.strftime('%Y-%m-%d')))
158 |
159 |
160 | for export_dt in sorted(utils.date_range(iter_start_dt, iter_end_dt),
161 | reverse=reverse_flag):
162 | export_date = export_dt.strftime('%Y-%m-%d')
163 |
164 | # if ((month_list and export_dt.month not in month_list) or
165 | # (year_list and export_dt.year not in year_list)):
166 | if month_list and export_dt.month not in month_list:
167 | logging.debug(f'Date: {export_date} - month not in INI - skipping')
168 | continue
169 | elif export_date >= datetime.datetime.today().strftime('%Y-%m-%d'):
170 | logging.debug(f'Date: {export_date} - unsupported date - skipping')
171 | continue
172 | logging.info(f'Date: {export_date}')
173 |
174 | export_id = ini['EXPORT']['export_id_fmt'] \
175 | .format(
176 | product=dt_name.lower(),
177 | date=export_dt.strftime('%Y%m%d'),
178 | export=datetime.datetime.today().strftime('%Y%m%d'),
179 | dest=ini['EXPORT']['export_dest'].lower())
180 | logging.debug(' Export ID: {}'.format(export_id))
181 |
182 | if ini['EXPORT']['export_dest'] == 'ASSET':
183 | asset_id = '{}/{}_{}'.format(
184 | dt_daily_coll_id, export_dt.strftime('%Y%m%d'),
185 | datetime.datetime.today().strftime('%Y%m%d'))
186 | logging.debug(' Asset ID: {}'.format(asset_id))
187 |
188 | if overwrite_flag:
189 | if export_id in tasks.keys():
190 | logging.debug(' Task already submitted, cancelling')
191 | ee.data.cancelTask(tasks[export_id])
192 | # This is intentionally not an "elif" so that a task can be
193 | # cancelled and an existing image/file/asset can be removed
194 | if (ini['EXPORT']['export_dest'].upper() == 'ASSET' and
195 | asset_id in asset_list):
196 | logging.debug(' Asset already exists, removing')
197 | ee.data.deleteAsset(asset_id)
198 | else:
199 | if export_id in tasks.keys():
200 | logging.debug(' Task already submitted, exiting')
201 | continue
202 | elif (ini['EXPORT']['export_dest'].upper() == 'ASSET' and
203 | asset_id in asset_list):
204 | logging.debug(' Asset with current export date already exists, '
205 | 'skipping')
206 | continue
207 | elif len(asset_id_dict[export_date]) > 0:
208 | logging.debug(' Asset with earlier export date already exists, '
209 | 'skipping')
210 | continue
211 |
212 | # Compute dT using a fake Landsat image
213 | # The system:time_start property is the only needed value
214 | model_obj = ssebop.Image(
215 | ee.Image.constant([0, 0]).rename(['ndvi', 'lst'])
216 | .set({
217 | 'system:time_start': utils.millis(export_dt),
218 | 'system:index': 'LC08_043033_20170716',
219 | 'system:id': 'LC08_043033_20170716'}),
220 | dt_source=dt_source.upper(),
221 | elev_source='SRTM',
222 | dt_min=ini['SSEBOP']['dt_min'],
223 | dt_max=ini['SSEBOP']['dt_max'],
224 | )
225 |
226 | # Cast to float and set properties
227 | dt_img = model_obj.dt.float() \
228 | .set({
229 | 'system:time_start': utils.millis(export_dt),
230 | 'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
231 | 'date': export_dt.strftime('%Y-%m-%d'),
232 | 'year': int(export_dt.year),
233 | 'month': int(export_dt.month),
234 | 'day': int(export_dt.day),
235 | 'doy': int(export_dt.strftime('%j')),
236 | 'model_name': model_name,
237 | 'model_version': ssebop.__version__,
238 | 'dt_source': dt_source.upper(),
239 | # 'dt_version': dt_version.upper(),
240 | })
241 |
242 | # Build export tasks
243 | if ini['EXPORT']['export_dest'] == 'ASSET':
244 | logging.debug(' Building export task')
245 | task = ee.batch.Export.image.toAsset(
246 | image=ee.Image(dt_img),
247 | description=export_id,
248 | assetId=asset_id,
249 | crs=export_crs,
250 | crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
251 | dimensions='{0}x{1}'.format(*export_shape),
252 | )
253 | logging.info(' Starting export task')
254 | utils.ee_task_start(task)
255 |
256 | # Pause before starting next task
257 | utils.delay_task(delay_time=delay)
258 | logging.debug('')
259 |
260 |
261 | def read_ini(ini_path):
262 | logging.debug('\nReading Input File')
263 | # Open config file
264 | config = configparser.ConfigParser()
265 | try:
266 | config.read(ini_path)
267 | except Exception as e:
268 | logging.error(
269 | '\nERROR: Input file could not be read, '
270 | 'is not an input file, or does not exist\n'
271 | ' ini_path={}\n\nException: {}'.format(ini_path, e))
272 | sys.exit()
273 |
274 | # Force conversion of unicode to strings
275 | ini = dict()
276 | for section in config.keys():
277 | ini[str(section)] = {}
278 | for k, v in config[section].items():
279 | ini[str(section)][str(k)] = v
280 | return ini
281 |
282 |
283 | def arg_parse():
284 | """"""
285 | parser = argparse.ArgumentParser(
286 | description='Compute/export daily dT images',
287 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
288 | parser.add_argument(
289 | '-i', '--ini', type=utils.arg_valid_file,
290 | help='Input file', metavar='FILE')
291 | parser.add_argument(
292 | '--delay', default=0, type=float,
293 | help='Delay (in seconds) between each export tasks')
294 | parser.add_argument(
295 | '--key', type=utils.arg_valid_file, metavar='FILE',
296 | help='JSON key file')
297 | parser.add_argument(
298 | '--reverse', default=False, action='store_true',
299 | help='Process dates in reverse order')
300 | parser.add_argument(
301 | '--overwrite', default=False, action='store_true',
302 | help='Force overwrite of existing files')
303 | parser.add_argument(
304 | '--debug', default=logging.INFO, const=logging.DEBUG,
305 | help='Debug level logging', action='store_const', dest='loglevel')
306 | args = parser.parse_args()
307 |
308 | return args
309 |
310 |
311 | if __name__ == "__main__":
312 | args = arg_parse()
313 | logging.basicConfig(level=args.loglevel, format='%(message)s')
314 |
315 | main(ini_path=args.ini, overwrite_flag=args.overwrite, delay=args.delay,
316 | key=args.key, reverse_flag=args.reverse)
317 |
--------------------------------------------------------------------------------
/assets/dt_gridmet.ini:
--------------------------------------------------------------------------------
1 | # GRIDMET dT Export Input File
2 |
3 | [INPUTS]
4 | # Date range
5 | # Start/end date will not be read in cron mode
6 | start_date = 2019-01-01
7 | end_date = 2019-06-30
8 |
9 | # start_date = 1980-01-01
10 | # end_date = 2018-12-31
11 |
12 |
13 | [EXPORT]
14 | # Export Destination (only ASSET is currently supported for Tcorr images)
15 | export_dest = ASSET
16 |
17 | # Project folder for the dT image collection
18 | # The collection name is computed from the dt_source
19 | export_coll = projects/usgs-ssebop/dt
20 |
21 | # Image name format
22 | export_id_fmt = dt_{product}_{date}_{export}_{dest}
23 |
24 |
25 | [SSEBOP]
26 | # Tmax choices: CIMIS, DAYMET, GRIDMET
27 | dt_source = GRIDMET
28 |
29 | elev_source = SRTM
30 |
31 | dt_min = 1
32 | dt_max = 25
33 |
--------------------------------------------------------------------------------
/assets/tmax_climo_ee_asset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import datetime
3 | import logging
4 | import os
5 | import pprint
6 |
7 | import ee
8 | import openet.core.utils as utils
9 | import openet.ssebop.model
10 |
11 |
12 | def main(tmax_source, statistic, year_start, year_end,
13 | doy_list=range(1, 367), gee_key_file=None, delay_time=0, max_ready=-1,
14 | overwrite_flag=False, elr_flag = False, reverse_flag=False):
15 | """Tmax Climatology Assets
16 |
17 | Parameters
18 | ----------
19 | tmax_source : {'CIMIS', 'DAYMET_V3', 'DAYMET_V4', 'GRIDMET'}
20 | Maximum air temperature source keyword.
21 | statistic : {'median', 'mean'}
22 | Climatology statistic.
23 | year_start : int
24 | Start year.
25 | year_end : int
26 | End year (inclusive).
27 | doy_list : list(int), optional
28 | Days of year to process (the default is 1-365).
29 | gee_key_file : str, None, optional
30 | File path to a service account json key file.
31 | delay_time : float, optional
32 | Delay time in seconds between starting export tasks (or checking the
33 | number of queued tasks, see "max_ready" parameter). The default is 0.
34 | max_ready: int, optional
35 | Maximum number of queued "READY" tasks. The default is -1 which is
36 | implies no limit to the number of tasks that will be submitted.
37 | overwrite_flag : bool, optional
38 | If True, overwrite existing files (the default is False).
39 | key_path : str, None, optional
40 | elr_flag : bool, optional
41 | If True, apply Elevation Lapse Rate (ELR) adjustment (the default is False).
42 | reverse_flag : bool, optional
43 | If True, process days in reverse order (the default is False).
44 |
45 | Returns
46 | -------
47 | None
48 |
49 | Notes
50 | -----
51 | Collection is built/filtered using "day of year" based on the system:time_start
52 | The DOY 366 collection is built by selecting only the DOY 365 images
53 | (so the DOY 366 image should be a copy of the DOY 365 image)
54 |
55 | Daymet calendar definition
56 | https://daac.ornl.gov/DAYMET/guides/Daymet_Daily_V4.html
57 | The Daymet calendar is based on a standard calendar year.
58 | All Daymet years, including leap years, have 1–365 days.
59 | For leap years, the Daymet data include leap day (February 29) and
60 | December 31 is discarded from leap years to maintain a 365-day year.
61 |
62 | """
63 | logging.info(f'\nGenerating {tmax_source} {statistic} asset')
64 |
65 | tmax_folder = 'projects/earthengine-legacy/assets/projects/usgs-ssebop/tmax'
66 |
67 | # MF - Could eventually make the DEM source (keyword-based) as an input argument.
68 | elev_source_id = 'CGIAR/SRTM90_V4'
69 |
70 | # CGM - Intentionally not setting the time_start
71 | # time_start_year = 1980
72 |
73 | if statistic.lower() not in ['median', 'mean']:
74 | raise ValueError(f'unsupported statistic: {statistic}')
75 |
76 | logging.info('\nInitializing Earth Engine')
77 | if gee_key_file and os.path.isfile(gee_key_file):
78 | logging.info(' Using service account key file: {}'.format(gee_key_file))
79 | # The "EE_ACCOUNT" doesn't seem to be used if the key file is valid
80 | ee.Initialize(ee.ServiceAccountCredentials('', key_file=gee_key_file))
81 | else:
82 | ee.Initialize()
83 |
84 | # CGM - Should we set default start/end years if they are not set by the user?
85 | if tmax_source.upper() in ['DAYMET_V3', 'DAYMET_V4']:
86 | tmax_coll = ee.ImageCollection('NASA/ORNL/' + tmax_source.upper()) \
87 | .select(['tmax']).map(c_to_k)
88 | elif tmax_source.upper() == 'CIMIS':
89 | tmax_coll = ee.ImageCollection('projects/climate-engine/cimis/daily') \
90 | .select(['Tx'], ['tmax']).map(c_to_k)
91 | elif tmax_source.upper() == 'GRIDMET':
92 | tmax_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') \
93 | .select(['tmmx'], ['tmax'])
94 | # elif tmax_source.upper() == 'TOPOWX':
95 | # tmax_coll = ee.ImageCollection('TOPOWX') \
96 | # .select(['tmmx'], ['tmax'])
97 | else:
98 | logging.error('Unsupported tmax_source: {}'.format(tmax_source))
99 | return False
100 |
101 | output_coll_id = f'{tmax_folder}/' \
102 | f'{tmax_source.lower()}_{statistic}_{year_start}_{year_end}'
103 | if elr_flag:
104 | elevation_img = ee.Image(elev_source_id)
105 | output_coll_id = output_coll_id + '_elr'
106 | output_coll_id = output_coll_id + '_cgm'
107 |
108 | tmax_info = ee.Image(tmax_coll.first()).getInfo()
109 | tmax_projection = ee.Image(tmax_coll.first()).projection()
110 | tmax_proj_info = tmax_projection.getInfo()
111 | if 'wkt' in tmax_proj_info.keys():
112 | tmax_crs = tmax_proj_info['wkt'].replace(' ', '').replace('\n', '')
113 | else:
114 | # TODO: Add support for projection have a "crs" key instead of "wkt"
115 | raise Exception('unsupported projection type')
116 |
117 | if tmax_source.upper() in ['DAYMET_V3', 'DAYMET_V4']:
118 | # TODO: Check if the DAYMET_V4 grid is aligned to DAYMET_V3
119 | # Custom smaller extent for DAYMET focused on CONUS
120 | extent = [-1999750, -1890500, 2500250, 1109500]
121 | dimensions = [4500, 3000]
122 | transform = [1000, 0, -1999750, 0, -1000, 1109500]
123 | # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
124 | # extent = [-2099750, -3090500, 2900250, 1909500]
125 | # dimensions = [5000, 5000]
126 | # transform = [1000, 0, -2099750, 0, -1000, 1909500]
127 | else:
128 | transform = tmax_proj_info['transform']
129 | dimensions = tmax_info['bands'][0]['dimensions']
130 | logging.info(' CRS: {}'.format(tmax_crs))
131 | logging.info(' Transform: {}'.format(transform))
132 | logging.info(' Dimensions: {}\n'.format(dimensions))
133 |
134 | # Build the export collection if it doesn't exist
135 | if not ee.data.getInfo(output_coll_id):
136 | logging.info('\nImage collection does not exist and will be built'
137 | '\n {}'.format(output_coll_id))
138 | input('Press ENTER to continue')
139 | ee.data.createAsset({'type': 'ImageCollection'}, output_coll_id)
140 | # # Switch type string if use_cloud_api=True
141 | # ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, output_coll_id)
142 |
143 | # Get current running assets
144 | # CGM: This is currently returning the asset IDs without earthengine-legacy
145 | assets = utils.get_ee_assets(output_coll_id)
146 | # assets = [asset_id.replace('projects/earthengine-legacy/assets/', '')
147 | # for asset_id in assets]
148 |
149 | # Get current running tasks
150 | tasks = utils.get_ee_tasks()
151 | if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
152 | logging.debug(' Tasks: {}'.format(len(tasks)))
153 | input('ENTER')
154 |
155 |
156 | for doy in sorted(doy_list, reverse=reverse_flag):
157 | logging.info('DOY: {:03d}'.format(doy))
158 |
159 | # CGM - Intentionally not setting the time_start
160 | # What year should we use for the system:time_start?
161 | # time_start_dt = datetime.datetime.strptime(
162 | # '{}_{:03d}'.format(time_start_year, doy), '%Y_%j')
163 | # logging.debug(' Time Start Date: {}'.format(
164 | # time_start_dt.strftime('%Y-%m-%d')))
165 |
166 | asset_id = '{}/{:03d}'.format(output_coll_id, doy)
167 | asset_short_id = asset_id.replace('projects/earthengine-legacy/assets/', '')
168 | export_id = 'tmax_{}_{}_{}_{}_day{:03d}'.format(
169 | tmax_source.lower(), statistic, year_start, year_end, doy)
170 | if elr_flag:
171 | export_id = export_id + '_elr'
172 | logging.debug(' Asset ID: {}'.format(asset_id))
173 | logging.debug(' Export ID: {}'.format(export_id))
174 |
175 | if overwrite_flag:
176 | if export_id in tasks.keys():
177 | logging.info(' Task already submitted, cancelling')
178 | ee.data.cancelTask(tasks[export_id])
179 | if asset_short_id in assets or asset_id in assets:
180 | logging.info(' Asset already exists, removing')
181 | ee.data.deleteAsset(asset_id)
182 | else:
183 | if export_id in tasks.keys():
184 | logging.info(' Task already submitted, skipping')
185 | continue
186 | elif asset_short_id in assets:
187 | logging.info(' Asset already exists, skipping')
188 | continue
189 |
190 | # Filter the Tmax collection the target day of year
191 | if doy < 366:
192 | tmax_doy_coll = tmax_coll \
193 | .filter(ee.Filter.calendarRange(doy, doy, 'day_of_year')) \
194 | .filter(ee.Filter.calendarRange(year_start, year_end, 'year'))
195 | else:
196 | # Compute DOY 366 as a copy of the DOY 365 values
197 | tmax_doy_coll = tmax_coll \
198 | .filter(ee.Filter.calendarRange(365, 365, 'day_of_year')) \
199 | .filter(ee.Filter.calendarRange(year_start, year_end, 'year'))
200 |
201 | # Compute the Tmax climo image
202 | if statistic.lower() == 'median':
203 | tmax_img = ee.Image(tmax_doy_coll.median())
204 | elif statistic.lower() == 'mean':
205 | tmax_img = ee.Image(tmax_doy_coll.mean())
206 |
207 | # Fill interior water holes with the mean of the surrounding cells
208 | # Use the filled image as the source to the where since tmax is nodata
209 | # CGM - Check if this is needed for DAYMET_V4
210 | if tmax_source.upper() in ['DAYMET_V3', 'DAYMET_V4']:
211 | filled_img = tmax_img.focal_mean(4000, 'circle', 'meters') \
212 | .reproject(tmax_crs, transform)
213 | tmax_img = filled_img.where(tmax_img.gt(0), tmax_img)
214 | # tmax_img = filled_img.where(tmax_img, tmax_img)
215 |
216 | if elr_flag:
217 | tmax_img = openet.ssebop.model.elr_adjust(
218 | temperature=tmax_img, elevation=elevation_img)
219 |
220 | tmax_img = tmax_img.set({
221 | 'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
222 | 'doy': int(doy),
223 | # 'doy': ee.String(ee.Number(doy).format('%03d')),
224 | 'elr_flag': elr_flag,
225 | 'year_start': year_start,
226 | 'year_end': year_end,
227 | 'years': tmax_doy_coll.size(),
228 | # CGM - Intentionally not setting the time_start
229 | # 'system:time_start': ee.Date(
230 | # time_start_dt.strftime('%Y-%m-%d')).millis()
231 | })
232 |
233 | # Build export tasks
234 | logging.debug(' Building export task')
235 | task = ee.batch.Export.image.toAsset(
236 | tmax_img,
237 | description=export_id,
238 | assetId=asset_id,
239 | dimensions='{0}x{1}'.format(*dimensions),
240 | crs=tmax_crs,
241 | crsTransform='[' + ','.join(map(str, transform)) + ']',
242 | maxPixels=int(1E10),
243 | )
244 | # task = ee.batch.Export.image.toCloudStorage(
245 | # tmax_img,
246 | # description=export_id,
247 | # bucket='tmax_',
248 | # fileNamePrefix=export_id,
249 | # dimensions='{0}x{1}'.format(*dimensions),
250 | # crs=tmax_crs,
251 | # crsTransform='[' + ','.join(map(str, transform)) + ']',
252 | # maxPixels=int(1E10),
253 | # fileFormat='GeoTIFF',
254 | # formatOptions={'cloudOptimized': True},
255 | # )
256 |
257 | logging.info(' Starting export task')
258 | utils.ee_task_start(task)
259 |
260 | # Pause before starting next task
261 | utils.delay_task(delay_time, max_ready)
262 |
263 |
264 | def c_to_k(image):
265 | """Convert temperature from C to K"""
266 | return image.add(273.15).copyProperties(image, ['system:time_start'])
267 |
268 |
269 | def arg_parse():
270 | """"""
271 | parser = argparse.ArgumentParser(
272 | description='Generate Tmax Climatology Assets',
273 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
274 | parser.add_argument(
275 | '--tmax', type=str, metavar='TMAX', required=True,
276 | choices=['CIMIS', 'DAYMET_V3', 'DAYMET_V4', 'GRIDMET'],
277 | help='Maximum air temperature source keyword')
278 | parser.add_argument(
279 | '--stat', choices=['median', 'mean'], required=True,
280 | help='Climatology statistic')
281 | parser.add_argument(
282 | '--start', type=int, metavar='YEAR', required=True, help='Start year')
283 | parser.add_argument(
284 | '--end', type=int, metavar='YEAR', required=True, help='End year')
285 | parser.add_argument(
286 | '--doy', default='1-366', metavar='DOY', type=utils.parse_int_set,
287 | help='Day of year (DOY) range to process')
288 | parser.add_argument(
289 | '--key', type=utils.arg_valid_file, metavar='FILE',
290 | help='Earth Engine service account JSON key file')
291 | parser.add_argument(
292 | '--delay', default=0, type=float,
293 | help='Delay (in seconds) between each export tasks')
294 | parser.add_argument(
295 | '--ready', default=-1, type=int,
296 | help='Maximum number of queued READY tasks')
297 | parser.add_argument(
298 | '--reverse', default=False, action='store_true',
299 | help='Process MGRS tiles in reverse order')
300 | parser.add_argument(
301 | '--elr', default=False, action='store_true',
302 | help='Apply Elevation Lapse Rate (ELR) adjustment')
303 | parser.add_argument(
304 | '-o', '--overwrite', default=False, action='store_true',
305 | help='Force overwrite of existing files')
306 | parser.add_argument(
307 | '-d', '--debug', default=logging.INFO, const=logging.DEBUG,
308 | help='Debug level logging', action='store_const', dest='loglevel')
309 | args = parser.parse_args()
310 | return args
311 |
312 |
313 | if __name__ == '__main__':
314 | args = arg_parse()
315 |
316 | logging.basicConfig(level=args.loglevel, format='%(message)s')
317 | logging.getLogger('googleapiclient').setLevel(logging.ERROR)
318 |
319 | main(tmax_source=args.tmax, statistic=args.stat,
320 | year_start=args.start, year_end=args.end,
321 | doy_list=args.doy, gee_key_file=args.key,
322 | delay_time=args.delay, max_ready=args.ready,
323 | overwrite_flag=args.overwrite, elr_flag = args.elr,
324 | reverse_flag=args.reverse,
325 | )
326 |
--------------------------------------------------------------------------------
/docs/ET_example.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/ET_example.PNG
--------------------------------------------------------------------------------
/docs/SSEBopLogo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/SSEBopLogo.jpg
--------------------------------------------------------------------------------
/docs/SSEBopLogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/SSEBopLogo.png
--------------------------------------------------------------------------------
/docs/SSEBopLogoBW.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/SSEBopLogoBW.PNG
--------------------------------------------------------------------------------
/docs/SSEBopLogoBW.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/SSEBopLogoBW.jpg
--------------------------------------------------------------------------------
/docs/SSEBop_GEE_diagram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Open-ET/openet-ssebop/68310d5ff709fdc4d7c2b61fd1cc95cb69a55001/docs/SSEBop_GEE_diagram.jpg
--------------------------------------------------------------------------------
/examples/test.py:
--------------------------------------------------------------------------------
1 | """A trivial change to master branch
2 | to make sure we can pull"""
--------------------------------------------------------------------------------
/openet/__init__.py:
--------------------------------------------------------------------------------
1 | __path__ = __import__('pkgutil').extend_path(__path__, __name__)
2 |
--------------------------------------------------------------------------------
/openet/ssebop/__init__.py:
--------------------------------------------------------------------------------
1 | from .image import Image
2 | from .collection import Collection
3 | from . import interpolate
4 |
5 | MODEL_NAME = 'SSEBOP'
6 |
7 | # from importlib import metadata
8 | # # __version__ = metadata.version(__package__ or __name__)
9 | # __version__ = metadata.version(__package__.replace('.', '-') or __name__.replace('.', '-'))
10 | # # __version__ = metadata.version('openet-ssebop')
11 |
--------------------------------------------------------------------------------
/openet/ssebop/collection.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import datetime
3 | from importlib import metadata
4 | # import pprint
5 | import warnings
6 |
7 | from dateutil.relativedelta import relativedelta
8 | import ee
9 | import openet.core.interpolate
10 | # TODO: import utils from openet.core
11 | # import openet.core.utils as utils
12 |
13 | from . import utils
14 | from .image import Image
15 |
16 |
17 | def lazy_property(fn):
18 | """Decorator that makes a property lazy-evaluated
19 |
20 | https://stevenloria.com/lazy-properties/
21 | """
22 | attr_name = '_lazy_' + fn.__name__
23 |
24 | @property
25 | def _lazy_property(self):
26 | if not hasattr(self, attr_name):
27 | setattr(self, attr_name, fn(self))
28 | return getattr(self, attr_name)
29 | return _lazy_property
30 |
31 |
32 | class Collection:
33 | """"""
34 |
35 | def __init__(
36 | self,
37 | collections,
38 | start_date,
39 | end_date,
40 | geometry,
41 | variables=None,
42 | cloud_cover_max=70,
43 | et_reference_source=None,
44 | et_reference_band=None,
45 | et_reference_factor=None,
46 | et_reference_resample=None,
47 | et_reference_date_type=None,
48 | filter_args=None,
49 | model_args=None,
50 | # **kwargs
51 | ):
52 | """Earth Engine based SSEBop Image Collection
53 |
54 | Parameters
55 | ----------
56 | collections : list, str
57 | GEE satellite image collection IDs.
58 | start_date : str
59 | ISO format inclusive start date (i.e. YYYY-MM-DD).
60 | end_date : str
61 | ISO format exclusive end date (i.e. YYYY-MM-DD).
62 | This date needs to be exclusive since it will be passed directly
63 | to the .filterDate() calls.
64 | geometry : ee.Geometry
65 | The geometry object will be used to filter the input collections
66 | using the ee.ImageCollection.filterBounds() method.
67 | variables : list, optional
68 | Output variables can also be specified in the method calls.
69 | cloud_cover_max : float, str
70 | Maximum cloud cover percentage (the default is 70%).
71 | - Landsat TOA: CLOUD_COVER_LAND
72 | - Landsat SR: CLOUD_COVER_LAND
73 | et_reference_source : str, float, optional
74 | Reference ET source (the default is None). Source must
75 | be set here or in model args to interpolate ET, ETf, or ETr.
76 | et_reference_band : str, optional
77 | Reference ET band name (the default is None). Band must
78 | be set here or in model args to interpolate ET, ETf, or ETr.
79 | et_reference_factor : float, None, optional
80 | Reference ET scaling factor. The default is None which is
81 | equivalent to 1.0 (or no scaling).
82 | et_reference_resample : {'nearest', 'bilinear', 'bicubic', None}, optional
83 | Reference ET resampling. The default is None which is equivalent
84 | to nearest neighbor resampling.
85 | filter_args : dict
86 | Image collection filters (the default is None).
87 | Organize filters as a dictionary with the key being
88 | the collection ID and the value an ee.Filter() object.
89 | model_args : dict
90 | Model Image initialization keyword arguments (the default is None).
91 | Dictionary will be passed through to model Image init.
92 |
93 | """
94 | self.collections = collections
95 | self.variables = variables
96 | self.start_date = start_date
97 | self.end_date = end_date
98 | self.geometry = geometry
99 | self.cloud_cover_max = cloud_cover_max
100 |
101 | # CGM - Should we check that model_args and filter_args are dict?
102 | if model_args is not None:
103 | self.model_args = model_args
104 | else:
105 | self.model_args = {}
106 |
107 | if filter_args is not None:
108 | self.filter_args = filter_args
109 | else:
110 | self.filter_args = {}
111 |
112 | # Reference ET parameters
113 | self.et_reference_source = et_reference_source
114 | self.et_reference_band = et_reference_band
115 | self.et_reference_factor = et_reference_factor
116 | self.et_reference_resample = et_reference_resample
117 | self.et_reference_date_type = et_reference_date_type
118 |
119 | # Check reference ET parameters
120 | if et_reference_factor and not utils.is_number(et_reference_factor):
121 | raise ValueError('et_reference_factor must be a number')
122 | if et_reference_factor and self.et_reference_factor < 0:
123 | raise ValueError('et_reference_factor must be greater than zero')
124 | et_reference_resample_methods = ['nearest', 'bilinear', 'bicubic']
125 | if (et_reference_resample and
126 | et_reference_resample.lower() not in et_reference_resample_methods):
127 | raise ValueError('unsupported et_reference_resample method')
128 | et_reference_date_type_methods = ['doy', 'daily']
129 | if (et_reference_date_type and
130 | et_reference_date_type.lower() not in et_reference_date_type_methods):
131 | raise ValueError('unsupported et_reference_date_type method')
132 |
133 | # Set/update the reference ET parameters in model_args if they were set in init()
134 | if self.et_reference_source:
135 | self.model_args['et_reference_source'] = self.et_reference_source
136 | if self.et_reference_band:
137 | self.model_args['et_reference_band'] = self.et_reference_band
138 | if self.et_reference_factor:
139 | self.model_args['et_reference_factor'] = self.et_reference_factor
140 | if self.et_reference_resample:
141 | self.model_args['et_reference_resample'] = self.et_reference_resample
142 | if self.et_reference_date_type:
143 | self.model_args['et_reference_date_type'] = self.et_reference_date_type
144 | # elif self.et_reference_date_type is None:
145 | # # Extra conditional needed since None is currently a valid date_type
146 | # # This should probably be changed so that "daily" is the default
147 | # self.model_args['et_reference_date_type'] = None
148 |
149 | # Model specific variables that can be interpolated to a daily timestep
150 | # CGM - Should this be specified in the interpolation method instead?
151 | self._interp_vars = ['ndvi', 'et_fraction']
152 |
153 | self._landsat_c2_sr_collections = [
154 | 'LANDSAT/LT04/C02/T1_L2',
155 | 'LANDSAT/LT05/C02/T1_L2',
156 | 'LANDSAT/LE07/C02/T1_L2',
157 | 'LANDSAT/LC08/C02/T1_L2',
158 | 'LANDSAT/LC09/C02/T1_L2',
159 | ]
160 |
161 | # If collections is a string, place in a list
162 | if type(self.collections) is str:
163 | self.collections = [self.collections]
164 |
165 | # Check that collection IDs are supported
166 | for coll_id in self.collections:
167 | if coll_id not in self._landsat_c2_sr_collections:
168 | raise ValueError(f'unsupported collection: {coll_id}')
169 |
170 | # Check that collections don't have "duplicates"
171 | # (i.e TOA and SR or TOA and TOA_RT for same Landsat)
172 | def duplicates(x):
173 | return len(x) != len(set(x))
174 | if duplicates([c.split('/')[1] for c in self.collections]):
175 | raise ValueError('duplicate landsat types in collection list')
176 |
177 | # Check start/end date
178 | if not utils.valid_date(self.start_date):
179 | raise ValueError('start_date is not a valid')
180 | elif not utils.valid_date(self.end_date):
181 | raise ValueError('end_date is not valid')
182 | elif not self.start_date < self.end_date:
183 | raise ValueError('end_date must be after start_date')
184 |
185 | # Check cloud_cover_max
186 | if (not type(self.cloud_cover_max) is int and
187 | not type(self.cloud_cover_max) is float and
188 | not utils.is_number(self.cloud_cover_max)):
189 | raise TypeError('cloud_cover_max must be a number')
190 | if type(self.cloud_cover_max) is str and utils.is_number(self.cloud_cover_max):
191 | self.cloud_cover_max = float(self.cloud_cover_max)
192 | if self.cloud_cover_max < 0 or self.cloud_cover_max > 100:
193 | raise ValueError('cloud_cover_max must be in the range 0 to 100')
194 |
195 | # # Attempt to cast string geometries to ee.Geometry
196 | # # This should work for geojson
197 | # if isinstance(self.geometry, ee.computedobject.ComputedObject):
198 | # pass
199 | # elif type(self.geometry) is str:
200 | # self.geometry = ee.Geometry(self.geometry())
201 |
202 | # Filter collection list based on start/end dates
203 | # if self.end_date <= '1982-01-01':
204 | # self.collections = [c for c in self.collections if 'LT04' not in c]
205 | # if self.start_date >= '1994-01-01':
206 | # self.collections = [c for c in self.collections if 'LT04' not in c]
207 | if self.end_date <= '1984-01-01':
208 | self.collections = [c for c in self.collections if 'LT05' not in c]
209 | if self.start_date >= '2012-01-01':
210 | self.collections = [c for c in self.collections if 'LT05' not in c]
211 | if self.end_date <= '1999-01-01':
212 | self.collections = [c for c in self.collections if 'LE07' not in c]
213 | if self.start_date >= '2022-01-01':
214 | self.collections = [c for c in self.collections if 'LE07' not in c]
215 | if self.end_date <= '2013-01-01':
216 | self.collections = [c for c in self.collections if 'LC08' not in c]
217 | if self.end_date <= '2022-01-01':
218 | self.collections = [c for c in self.collections if 'LC09' not in c]
219 |
220 | def _build(self, variables=None, start_date=None, end_date=None):
221 | """Build a merged model variable image collection
222 |
223 | Parameters
224 | ----------
225 | variables : list
226 | Set a variable list that is different than the class variable list.
227 | start_date : str, optional
228 | Set a start_date that is different than the class start_date.
229 | This is needed when defining the scene collection to have extra
230 | images for interpolation.
231 | end_date : str, optional
232 | Set an exclusive end_date that is different than the class end_date.
233 |
234 | Returns
235 | -------
236 | ee.ImageCollection
237 |
238 | Raises
239 | ------
240 | ValueError if collection IDs are invalid.
241 | ValueError if variables is not set here and in class init.
242 |
243 | """
244 | # Override the class parameters if necessary
245 | # Distinguish between variables defaulting to None, and variables being
246 | # set to an empty list, in which case the merged landsat collection
247 | # should be returned.
248 | if variables is None:
249 | if self.variables:
250 | variables = self.variables
251 | else:
252 | raise ValueError('variables parameter must be set')
253 | elif not variables:
254 | pass
255 | if not start_date:
256 | start_date = self.start_date
257 | if not end_date:
258 | end_date = self.end_date
259 |
260 | # Build the variable image collection
261 | variable_coll = ee.ImageCollection([])
262 | for coll_id in self.collections:
263 | # TODO: Move to separate methods/functions for each collection type
264 | if coll_id in self._landsat_c2_sr_collections:
265 | input_coll = (
266 | ee.ImageCollection(coll_id)
267 | .filterDate(start_date, end_date)
268 | .filterBounds(self.geometry)
269 | .filterMetadata('CLOUD_COVER_LAND', 'less_than', self.cloud_cover_max)
270 | .filterMetadata('CLOUD_COVER_LAND', 'greater_than', -0.5)
271 | )
272 |
273 | # TODO: Check if PROCESSING_LEVEL needs to be filtered on
274 | # .filterMetadata('PROCESSING_LEVEL', 'equals', 'L2SP')
275 |
276 | # TODO: Move this to a separate function (maybe in utils.py?)
277 | # since it is identical for all the supported collections
278 | if (self.filter_args is None or
279 | not isinstance(self.filter_args, dict) or
280 | coll_id not in self.filter_args.keys()):
281 | pass
282 | elif isinstance(self.filter_args[coll_id], ee.ComputedObject):
283 | input_coll = input_coll.filter(self.filter_args[coll_id])
284 | elif isinstance(self.filter_args[coll_id], list):
285 | # TODO: This generic dictionary based filtering should
286 | # probably be removed since only the "equals" filter
287 | # has been implemented and the functionality is better
288 | # handled with the other two options.
289 | for f in copy.deepcopy(self.filter_args[coll_id]):
290 | try:
291 | filter_type = f.pop('type')
292 | except KeyError:
293 | continue
294 | if filter_type.lower() == 'equals':
295 | input_coll = input_coll.filter(ee.Filter.equals(**f))
296 | else:
297 | raise ValueError('Unsupported filter_arg parameter')
298 |
299 | # TODO: Check if these bad images are in collection 2
300 | # Time filters are to remove bad (L5) and pre-op (L8) images
301 | if 'LT05' in coll_id:
302 | input_coll = input_coll.filter(
303 | ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())
304 | )
305 | elif 'LE07' in coll_id:
306 | input_coll = input_coll.filter(
307 | ee.Filter.lt('system:time_start', ee.Date('2022-01-01').millis())
308 | )
309 | elif 'LC08' in coll_id:
310 | input_coll = input_coll.filter(
311 | ee.Filter.gt('system:time_start', ee.Date('2013-04-01').millis())
312 | )
313 | elif 'LC09' in coll_id:
314 | input_coll = input_coll.filter(
315 | ee.Filter.gt('system:time_start', ee.Date('2022-01-01').millis())
316 | )
317 |
318 | def compute_vars(image):
319 | model_obj = Image.from_landsat_c2_sr(
320 | sr_image=ee.Image(image), **self.model_args
321 | )
322 | return model_obj.calculate(variables)
323 |
324 | # Skip going into image class if variables is not set so raw
325 | # landsat collection can be returned for getting image_id_list
326 | if variables:
327 | input_coll = ee.ImageCollection(input_coll.map(compute_vars))
328 |
329 | variable_coll = variable_coll.merge(input_coll)
330 |
331 | return variable_coll
332 |
333 | def overpass(self, variables=None):
334 | """Return a collection of computed values for the overpass images
335 |
336 | Parameters
337 | ----------
338 | variables : list, optional
339 | List of variables that will be returned in the Image Collection.
340 | If variables is not set here it must be specified in the class
341 | instantiation call.
342 |
343 | Returns
344 | -------
345 | ee.ImageCollection
346 |
347 | Raises
348 | ------
349 | ValueError
350 |
351 | """
352 | # Does it make sense to use the class variable list if not set?
353 | if not variables:
354 | if self.variables:
355 | variables = self.variables
356 | else:
357 | raise ValueError('variables parameter must be set')
358 |
359 | return self._build(variables=variables)
360 |
361 | def interpolate(
362 | self,
363 | variables=None,
364 | t_interval='custom',
365 | interp_method='linear',
366 | interp_days=32,
367 | use_joins=False,
368 | **kwargs
369 | ):
370 | """
371 |
372 | Parameters
373 | ----------
374 | variables : list, optional
375 | List of variables that will be returned in the Image Collection.
376 | If variables is not set here it must be specified in the class
377 | instantiation call.
378 | t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
379 | Time interval over which to interpolate and aggregate values
380 | The default 'custom' interval will aggregate all days within the
381 | start/end dates and return an image collection with a single image.
382 | interp_method : {'linear}, optional
383 | Interpolation method (the default is 'linear').
384 | interp_days : int, str, optional
385 | Number of extra days before the start date and after the end date
386 | to include in the interpolation calculation. (the default is 32).
387 | use_joins : bool, optional
388 | If True, use joins to link the target and source collections.
389 | If False, the source collection will be filtered for each target image.
390 | This parameter is passed through to interpolate.daily().
391 | kwargs : dict, optional
392 |
393 | Returns
394 | -------
395 | ee.ImageCollection
396 |
397 | Raises
398 | ------
399 | ValueError for unsupported input parameters
400 | ValueError for negative interp_days values
401 | TypeError for non-integer interp_days
402 |
403 | Notes
404 | -----
405 | Not all variables can be interpolated to new time steps.
406 | Variables like reference ET are simply summed whereas ET fraction is
407 | computed from the interpolated/aggregated values.
408 |
409 | """
410 | # Check that the input parameters are valid
411 | if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
412 | raise ValueError(f'unsupported t_interval: {t_interval}')
413 | elif interp_method.lower() not in ['linear']:
414 | raise ValueError(f'unsupported interp_method: {interp_method}')
415 |
416 | if type(interp_days) is str and utils.is_number(interp_days):
417 | interp_days = int(interp_days)
418 | elif not type(interp_days) is int:
419 | raise TypeError('interp_days must be an integer')
420 | elif interp_days <= 0:
421 | raise ValueError('interp_days must be a positive integer')
422 |
423 | # Does it make sense to use the class variable list if not set?
424 | if not variables:
425 | if self.variables:
426 | variables = self.variables
427 | else:
428 | raise ValueError('variables parameter must be set')
429 |
430 | # Adjust start/end dates based on t_interval
431 | # Increase the date range to fully include the time interval
432 | start_dt = datetime.datetime.strptime(self.start_date, '%Y-%m-%d')
433 | end_dt = datetime.datetime.strptime(self.end_date, '%Y-%m-%d')
434 | if t_interval.lower() == 'annual':
435 | start_dt = datetime.datetime(start_dt.year, 1, 1)
436 | # Covert end date to inclusive, flatten to beginning of year,
437 | # then add a year which will make it exclusive
438 | end_dt -= relativedelta(days=+1)
439 | end_dt = datetime.datetime(end_dt.year, 1, 1)
440 | end_dt += relativedelta(years=+1)
441 | elif t_interval.lower() == 'monthly':
442 | start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
443 | end_dt -= relativedelta(days=+1)
444 | end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
445 | end_dt += relativedelta(months=+1)
446 | start_date = start_dt.strftime('%Y-%m-%d')
447 | end_date = end_dt.strftime('%Y-%m-%d')
448 |
449 | # The start/end date for the interpolation include more days
450 | # (+/- interp_days) than are included in the reference ET collection
451 | interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
452 | interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
453 | interp_start_date = interp_start_dt.date().isoformat()
454 | interp_end_date = interp_end_dt.date().isoformat()
455 |
456 | # Update model_args if reference ET parameters were passed to interpolate
457 | # Intentionally using model_args (instead of self.et_reference_source, etc.) in
458 | # this function since model_args is passed to Image class in _build()
459 | # if 'et' in variables or 'et_reference' in variables:
460 | et_reference_keywords = [
461 | 'et_reference_source', 'et_reference_band', 'et_reference_factor',
462 | 'et_reference_resample', 'et_reference_date_type'
463 | ]
464 | for k in et_reference_keywords:
465 | if k in kwargs.keys() and kwargs[k] is not None:
466 | self.model_args[k] = kwargs[k]
467 |
468 | # Check that all reference ET parameters were set
469 | for et_reference_param in ['et_reference_source', 'et_reference_band',
470 | 'et_reference_factor']:
471 | if et_reference_param not in self.model_args.keys():
472 | raise ValueError(f'{et_reference_param} was not set')
473 | elif not self.model_args[et_reference_param]:
474 | raise ValueError(f'{et_reference_param} was not set')
475 |
476 | if type(self.model_args['et_reference_source']) is str:
477 | # Assume a string source is a single image collection ID
478 | # not a list of collection IDs or ee.ImageCollection
479 | if ('et_reference_date_type' not in self.model_args.keys() or
480 | self.model_args['et_reference_date_type'] is None or
481 | self.model_args['et_reference_date_type'].lower() == 'daily'):
482 | daily_et_ref_coll = (
483 | ee.ImageCollection(self.model_args['et_reference_source'])
484 | .filterDate(start_date, end_date)
485 | .select([self.model_args['et_reference_band']], ['et_reference'])
486 | )
487 | elif self.model_args['et_reference_date_type'].lower() == 'doy':
488 | # Assume the image collection is a climatology with a "DOY" property
489 | def doy_image(input_img):
490 | """Return the doy-based reference et with daily time properties from GRIDMET"""
491 | image_date = ee.Algorithms.Date(input_img.get('system:time_start'))
492 | image_doy = ee.Number(image_date.getRelative('day', 'year')).add(1).int()
493 | doy_coll = (
494 | ee.ImageCollection(self.model_args['et_reference_source'])
495 | .filterMetadata('DOY', 'equals', image_doy)
496 | .select([self.model_args['et_reference_band']], ['et_reference'])
497 | )
498 | # CGM - Was there a reason to use rangeContains if limiting to one DOY?
499 | # .filter(ee.Filter.rangeContains('DOY', image_doy, image_doy))\
500 | return ee.Image(doy_coll.first())\
501 | .set({'system:index': input_img.get('system:index'),
502 | 'system:time_start': input_img.get('system:time_start')})
503 | # Note, the collection and band that are used are important as
504 | # long as they are daily and available for the time period
505 | daily_et_ref_coll = (
506 | ee.ImageCollection('IDAHO_EPSCOR/GRIDMET')
507 | .filterDate(start_date, end_date).select(['eto'])
508 | .map(doy_image)
509 | )
510 | # elif isinstance(self.model_args['et_reference_source'], computedobject.ComputedObject):
511 | # # Interpret computed objects as image collections
512 | # daily_et_ref_coll = self.model_args['et_reference_source']\
513 | # .filterDate(self.start_date, self.end_date)\
514 | # .select([self.model_args['et_reference_band']])
515 | else:
516 | raise ValueError(
517 | f'unsupported et_reference_source: {self.model_args["et_reference_source"]}'
518 | )
519 |
520 | # Scale reference ET images (if necessary)
521 | # CGM - Resampling is not working correctly so not including for now
522 | if (self.model_args['et_reference_factor'] and
523 | self.model_args['et_reference_factor'] != 1):
524 | def et_reference_adjust(input_img):
525 | return (
526 | input_img
527 | .multiply(self.model_args['et_reference_factor'])
528 | .copyProperties(input_img)
529 | .set({'system:time_start': input_img.get('system:time_start')})
530 | )
531 |
532 | daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
533 |
534 | # Initialize variable list to only variables that can be interpolated
535 | interp_vars = list(set(self._interp_vars) & set(variables))
536 |
537 | # To return ET, the ET fraction must be interpolated
538 | if 'et' in variables and 'et_fraction' not in interp_vars:
539 | interp_vars.append('et_fraction')
540 |
541 | # With the current interpolate.daily() function,
542 | # something has to be interpolated in order to return et_reference
543 | if 'et_reference' in variables and 'et_fraction' not in interp_vars:
544 | interp_vars.append('et_fraction')
545 |
546 | # The time band is always needed for interpolation
547 | interp_vars.append('time')
548 |
549 | # Count will be determined using the aggregate_coll image masks
550 | if 'count' in variables:
551 | interp_vars.append('mask')
552 | # interp_vars.remove('count')
553 |
554 | # Build initial scene image collection
555 | scene_coll = self._build(
556 | variables=interp_vars, start_date=interp_start_date, end_date=interp_end_date,
557 | )
558 |
559 | # For count, compute the composite/mosaic image for the mask band only
560 | if 'count' in variables:
561 | aggregate_coll = openet.core.interpolate.aggregate_to_daily(
562 | image_coll=scene_coll.select(['mask']),
563 | start_date=start_date,
564 | end_date=end_date,
565 | )
566 |
567 | # The following is needed because the aggregate collection can be
568 | # empty if there are no scenes in the target date range but there
569 | # are scenes in the interpolation date range.
570 | # Without this the count image will not be built but the other
571 | # bands will be which causes a non-homogenous image collection.
572 | aggregate_coll = aggregate_coll.merge(
573 | ee.Image.constant(0).rename(['mask'])
574 | .set({'system:time_start': ee.Date(start_date).millis()})
575 | )
576 |
577 | # Including count/mask causes problems in interpolate.daily() function.
578 | # Issues with mask being an int but the values need to be double.
579 | # Casting the mask band to a double would fix this problem also.
580 | if 'mask' in interp_vars:
581 | interp_vars.remove('mask')
582 |
583 | # Interpolate to a daily time step
584 | # NOTE: the daily function is not computing ET (ETf x ETr)
585 | # but is returning the target (ETr) band
586 | daily_coll = openet.core.interpolate.daily(
587 | target_coll=daily_et_ref_coll,
588 | source_coll=scene_coll.select(interp_vars),
589 | interp_method=interp_method,
590 | interp_days=interp_days,
591 | use_joins=use_joins,
592 | compute_product=False,
593 | # resample_method=et_reference_resample,
594 | )
595 |
596 | # Compute ET from ET fraction and reference ET (if necessary)
597 | # CGM - The conditional is needed if only interpolating NDVI
598 | if ('et' in variables) or ('et_fraction' in variables):
599 | def compute_et(img):
600 | """This function assumes reference ET and ET fraction are present"""
601 | # Apply any resampling to the reference ET image before computing ET
602 | et_ref_img = img.select(['et_reference'])
603 | if (self.model_args['et_reference_resample'] and
604 | (self.model_args['et_reference_resample'] in ['bilinear', 'bicubic'])):
605 | et_ref_img = et_ref_img.resample(self.model_args['et_reference_resample'])
606 |
607 | et_img = img.select(['et_fraction']).multiply(et_ref_img)
608 |
609 | return img.addBands(et_img.rename('et'))
610 |
611 | daily_coll = daily_coll.map(compute_et)
612 |
613 | interp_properties = {
614 | 'cloud_cover_max': self.cloud_cover_max,
615 | 'collections': ', '.join(self.collections),
616 | 'interp_days': interp_days,
617 | 'interp_method': interp_method,
618 | 'model_name': metadata.metadata('openet-ssebop')['Name'],
619 | 'model_version': metadata.metadata('openet-ssebop')['Version'],
620 | # 'model_name': openet.ssebop.MODEL_NAME,
621 | # 'model_version': openet.ssebop.__version__,
622 | }
623 | interp_properties.update(self.model_args)
624 |
625 | def aggregate_image(agg_start_date, agg_end_date, date_format):
626 | """Aggregate the daily images within the target date range
627 |
628 | Parameters
629 | ----------
630 | agg_start_date: str
631 | Start date (inclusive).
632 | agg_end_date : str
633 | End date (exclusive).
634 | date_format : str
635 | Date format for system:index (uses EE JODA format).
636 |
637 | Returns
638 | -------
639 | ee.Image
640 |
641 | Notes
642 | -----
643 | Since this function takes multiple inputs it is being called
644 | for each time interval by separate mappable functions
645 |
646 | """
647 | if ('et' in variables) or ('et_fraction' in variables):
648 | et_img = (
649 | daily_coll.filterDate(agg_start_date, agg_end_date)
650 | .select(['et']).sum()
651 | )
652 |
653 | if ('et_reference' in variables) or ('et_fraction' in variables):
654 | et_reference_img = (
655 | daily_coll.filterDate(agg_start_date, agg_end_date)
656 | .select(['et_reference']).sum()
657 | )
658 | if (self.model_args['et_reference_resample'] and
659 | (self.model_args['et_reference_resample'] in ['bilinear', 'bicubic'])):
660 | et_reference_img = (
661 | et_reference_img
662 | .setDefaultProjection(daily_et_ref_coll.first().projection())
663 | .resample(self.model_args['et_reference_resample'])
664 | )
665 |
666 | image_list = []
667 | if 'et' in variables:
668 | image_list.append(et_img.float())
669 | if 'et_reference' in variables:
670 | image_list.append(et_reference_img.float())
671 | if 'et_fraction' in variables:
672 | # Compute average et fraction over the aggregation period
673 | image_list.append(
674 | et_img.divide(et_reference_img).rename(['et_fraction']).float()
675 | )
676 | if 'ndvi' in variables:
677 | # Compute average ndvi over the aggregation period
678 | ndvi_img = (
679 | daily_coll.filterDate(agg_start_date, agg_end_date)
680 | .mean().select(['ndvi']).float()
681 | )
682 | image_list.append(ndvi_img)
683 | if 'count' in variables:
684 | count_img = (
685 | aggregate_coll.filterDate(agg_start_date, agg_end_date)
686 | .select(['mask']).reduce(ee.Reducer.count()).rename('count').uint8()
687 | )
688 | image_list.append(count_img)
689 |
690 | return (
691 | ee.Image(image_list)
692 | .set(interp_properties)
693 | .set({
694 | 'system:index': ee.Date(agg_start_date).format(date_format),
695 | 'system:time_start': ee.Date(agg_start_date).millis(),
696 | })
697 | )
698 |
699 | # Combine input, interpolated, and derived values
700 | if t_interval.lower() == 'daily':
701 | def aggregate_daily(daily_img):
702 | # CGM - Double check that this time_start is a 0 UTC time.
703 | # It should be since it is coming from the interpolate source
704 | # collection, but what if source is GRIDMET (+6 UTC)?
705 | agg_start_date = ee.Date(daily_img.get('system:time_start'))
706 | # CGM - This calls .sum() on collections with only one image
707 | return aggregate_image(
708 | agg_start_date=agg_start_date,
709 | agg_end_date=ee.Date(agg_start_date).advance(1, 'day'),
710 | date_format='YYYYMMdd',
711 | )
712 |
713 | return ee.ImageCollection(daily_coll.map(aggregate_daily))
714 |
715 | elif t_interval.lower() == 'monthly':
716 | def month_gen(iter_start_dt, iter_end_dt):
717 | iter_dt = iter_start_dt
718 | # Conditional is "less than" because end date is exclusive
719 | while iter_dt < iter_end_dt:
720 | yield iter_dt.strftime('%Y-%m-%d')
721 | iter_dt += relativedelta(months=+1)
722 | month_list = ee.List(list(month_gen(start_dt, end_dt)))
723 |
724 | def aggregate_monthly(agg_start_date):
725 | return aggregate_image(
726 | agg_start_date=agg_start_date,
727 | agg_end_date=ee.Date(agg_start_date).advance(1, 'month'),
728 | date_format='YYYYMM',
729 | )
730 |
731 | return ee.ImageCollection(month_list.map(aggregate_monthly))
732 |
733 | elif t_interval.lower() == 'annual':
734 | def year_gen(iter_start_dt, iter_end_dt):
735 | iter_dt = iter_start_dt
736 | while iter_dt < iter_end_dt:
737 | yield iter_dt.strftime('%Y-%m-%d')
738 | iter_dt += relativedelta(years=+1)
739 | year_list = ee.List(list(year_gen(start_dt, end_dt)))
740 |
741 | def aggregate_annual(agg_start_date):
742 | return aggregate_image(
743 | agg_start_date=agg_start_date,
744 | agg_end_date=ee.Date(agg_start_date).advance(1, 'year'),
745 | date_format='YYYY',
746 | )
747 |
748 | return ee.ImageCollection(year_list.map(aggregate_annual))
749 |
750 | elif t_interval.lower() == 'custom':
751 | # Returning an ImageCollection to be consistent
752 | return ee.ImageCollection(aggregate_image(
753 | agg_start_date=start_date, agg_end_date=end_date, date_format='YYYYMMdd',
754 | ))
755 |
756 | def get_image_ids(self):
757 | """Return image IDs of the input images
758 |
759 | Returns
760 | -------
761 | list
762 |
763 | Notes
764 | -----
765 | This image list is based on the collection start and end dates and may
766 | not include all of the images used for interpolation.
767 |
768 | """
769 | # CGM - Setting variables to None bypasses the Image class, so image_id
770 | # is not set and merge indices must be removed from the system:index
771 | return list(utils.getinfo(self._build(variables=[]).aggregate_array('system:id')))
772 | # return list(utils.getinfo(self._build(variables=['ndvi']).aggregate_array('image_id')))
773 |
--------------------------------------------------------------------------------
/openet/ssebop/interpolate.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | import logging
3 |
4 | from dateutil.relativedelta import relativedelta
5 | import ee
6 | import openet.core.interpolate
7 | # TODO: import utils from openet.core
8 | # import openet.core.utils as utils
9 |
10 | from . import utils
11 |
12 | RESAMPLE_METHODS = ['nearest', 'bilinear', 'bicubic']
13 |
14 | def from_scene_et_fraction(
15 | scene_coll,
16 | start_date,
17 | end_date,
18 | variables,
19 | interp_args,
20 | model_args,
21 | t_interval,
22 | _interp_vars=['et_fraction', 'ndvi'],
23 | ):
24 | """Interpolate from a precomputed collection of Landsat ET fraction scenes
25 |
26 | Parameters
27 | ----------
28 | scene_coll : ee.ImageCollection
29 | Non-daily 'et_fraction' images that will be interpolated.
30 | start_date : str
31 | ISO format start date.
32 | end_date : str
33 | ISO format end date (exclusive, passed directly to .filterDate()).
34 | variables : list
35 | List of variables that will be returned in the Image Collection.
36 | interp_args : dict
37 | Parameters from the INTERPOLATE section of the INI file.
38 | # TODO: Look into a better format for showing the options
39 | interp_method : {'linear}, optional
40 | Interpolation method. The default is 'linear'.
41 | interp_days : int, str, optional
42 | Number of extra days before the start date and after the end date
43 | to include in the interpolation calculation. The default is 32.
44 | et_reference_source : str
45 | Reference ET collection ID.
46 | et_reference_band : str
47 | Reference ET band name.
48 | et_reference_factor : float, None, optional
49 | Reference ET scaling factor. The default is 1.0 which is
50 | equivalent to no scaling.
51 | et_reference_resample : {'nearest', 'bilinear', 'bicubic', None}, optional
52 | Reference ET resampling. The default is 'nearest'.
53 | mask_partial_aggregations : bool, optional
54 | If True, pixels with an aggregation count less than the number of
55 | days in the aggregation time period will be masked. The default is True.
56 | use_joins : bool, optional
57 | If True, use joins to link the target and source collections.
58 | If False, the source collection will be filtered for each target image.
59 | This parameter is passed through to interpolate.daily().
60 | model_args : dict
61 | Parameters from the MODEL section of the INI file.
62 | t_interval : {'daily', 'monthly', 'custom'}
63 | Time interval over which to interpolate and aggregate values
64 | The 'custom' interval will aggregate all days within the start and end
65 | dates into an image collection with a single image.
66 | _interp_vars : list, optional
67 | The variables that can be interpolated to daily timesteps.
68 | The default is to interpolate the 'et_fraction' and 'ndvi' bands.
69 |
70 | Returns
71 | -------
72 | ee.ImageCollection
73 |
74 | Raises
75 | ------
76 | ValueError
77 |
78 | Notes
79 | -----
80 | This function assumes that "mask" and "time" bands are not in the scene collection.
81 |
82 | """
83 | # Get interp_method
84 | if 'interp_method' in interp_args.keys():
85 | interp_method = interp_args['interp_method']
86 | else:
87 | interp_method = 'linear'
88 | logging.debug('interp_method was not set in interp_args, default to "linear"')
89 |
90 | # Get interp_days
91 | if 'interp_days' in interp_args.keys():
92 | interp_days = interp_args['interp_days']
93 | else:
94 | interp_days = 32
95 | logging.debug('interp_days was not set in interp_args, default to 32')
96 |
97 | # Get mask_partial_aggregations
98 | if 'mask_partial_aggregations' in interp_args.keys():
99 | mask_partial_aggregations = interp_args['mask_partial_aggregations']
100 | else:
101 | mask_partial_aggregations = True
102 | logging.debug('mask_partial_aggregations was not set in interp_args, default to True')
103 |
104 | # Get use_joins
105 | if 'use_joins' in interp_args.keys():
106 | use_joins = interp_args['use_joins']
107 | else:
108 | use_joins = True
109 | logging.debug('use_joins was not set in interp_args, default to True')
110 |
111 | # Check that the input parameters are valid
112 | if t_interval.lower() not in ['daily', 'monthly', 'custom']:
113 | raise ValueError(f'unsupported t_interval: {t_interval}')
114 | elif interp_method.lower() not in ['linear']:
115 | raise ValueError(f'unsupported interp_method: {interp_method}')
116 |
117 | if (((type(interp_days) is str) or (type(interp_days) is float)) and
118 | utils.is_number(interp_days)):
119 | interp_days = int(interp_days)
120 | elif not type(interp_days) is int:
121 | raise TypeError('interp_days must be an integer')
122 | elif interp_days <= 0:
123 | raise ValueError('interp_days must be a positive integer')
124 |
125 | if not variables:
126 | raise ValueError('variables parameter must be set')
127 |
128 | # Adjust start/end dates based on t_interval
129 | # Increase the date range to fully include the time interval
130 | start_dt = datetime.strptime(start_date, '%Y-%m-%d')
131 | end_dt = datetime.strptime(end_date, '%Y-%m-%d')
132 | if t_interval.lower() == 'monthly':
133 | start_dt = datetime(start_dt.year, start_dt.month, 1)
134 | end_dt -= relativedelta(days=+1)
135 | end_dt = datetime(end_dt.year, end_dt.month, 1)
136 | end_dt += relativedelta(months=+1)
137 | start_date = start_dt.strftime('%Y-%m-%d')
138 | end_date = end_dt.strftime('%Y-%m-%d')
139 |
140 | # The start/end date for the interpolation include more days
141 | # (+/- interp_days) than are included in the ETr collection
142 | interp_start_dt = start_dt - timedelta(days=interp_days)
143 | interp_end_dt = end_dt + timedelta(days=interp_days)
144 | interp_start_date = interp_start_dt.date().isoformat()
145 | interp_end_date = interp_end_dt.date().isoformat()
146 |
147 | # Get reference ET parameters
148 | # Supporting reading the parameters from both the interp_args and model_args dictionaries
149 | # Check interp_args then model_args, and eventually drop support for reading from model_args
150 | # Assume that if source and band are present, factor and resample should also be read
151 | if ('et_reference_source' in interp_args.keys()) and ('et_reference_band' in interp_args.keys()):
152 | et_reference_source = interp_args['et_reference_source']
153 | et_reference_band = interp_args['et_reference_band']
154 | if not et_reference_source or not et_reference_band:
155 | raise ValueError('et_reference_source or et_reference_band were not set')
156 |
157 | if 'et_reference_factor' in interp_args.keys():
158 | et_reference_factor = interp_args['et_reference_factor']
159 | else:
160 | et_reference_factor = 1.0
161 | logging.debug('et_reference_factor was not set, default to 1.0')
162 |
163 | if 'et_reference_resample' in interp_args.keys():
164 | et_reference_resample = interp_args['et_reference_resample'].lower()
165 | if not et_reference_resample:
166 | et_reference_resample = 'nearest'
167 | logging.debug('et_reference_resample was not set, default to nearest')
168 | elif et_reference_resample not in RESAMPLE_METHODS:
169 | raise ValueError(f'unsupported et_reference_resample method: '
170 | f'{et_reference_resample}')
171 | else:
172 | et_reference_resample = 'nearest'
173 | logging.debug('et_reference_resample was not set, default to nearest')
174 |
175 | elif ('et_reference_source' in model_args.keys()) and ('et_reference_band' in model_args.keys()):
176 | et_reference_source = model_args['et_reference_source']
177 | et_reference_band = model_args['et_reference_band']
178 | if not et_reference_source or not et_reference_band:
179 | raise ValueError('et_reference_source or et_reference_band were not set')
180 |
181 | if 'et_reference_factor' in model_args.keys():
182 | et_reference_factor = model_args['et_reference_factor']
183 | else:
184 | et_reference_factor = 1.0
185 | logging.debug('et_reference_factor was not set, default to 1.0')
186 |
187 | if 'et_reference_resample' in model_args.keys():
188 | et_reference_resample = model_args['et_reference_resample'].lower()
189 | if not et_reference_resample:
190 | et_reference_resample = 'nearest'
191 | logging.debug('et_reference_resample was not set, default to nearest')
192 | elif et_reference_resample not in RESAMPLE_METHODS:
193 | raise ValueError(f'unsupported et_reference_resample method: '
194 | f'{et_reference_resample}')
195 | else:
196 | et_reference_resample = 'nearest'
197 | logging.debug('et_reference_resample was not set, default to nearest')
198 |
199 | else:
200 | raise ValueError('et_reference_source or et_reference_band were not set')
201 |
202 |
203 | if 'et_reference_date_type' in model_args.keys():
204 | et_reference_date_type = model_args['et_reference_date_type']
205 | else:
206 | et_reference_date_type = None
207 | # logging.debug('et_reference_date_type was not set, default to "daily"')
208 | # et_reference_date_type = 'daily'
209 |
210 |
211 | if type(et_reference_source) is str:
212 | # Assume a string source is a single image collection ID
213 | # not a list of collection IDs or ee.ImageCollection
214 | if (et_reference_date_type is None) or (et_reference_date_type.lower() == 'daily'):
215 | daily_et_ref_coll = (
216 | ee.ImageCollection(et_reference_source)
217 | .filterDate(start_date, end_date)
218 | .select([et_reference_band], ['et_reference'])
219 | )
220 | elif et_reference_date_type.lower() == 'doy':
221 | # Assume the image collection is a climatology with a "DOY" property
222 | def doy_image(input_img):
223 | """Return the doy-based reference et with daily time properties from GRIDMET"""
224 | image_date = ee.Algorithms.Date(input_img.get('system:time_start'))
225 | image_doy = ee.Number(image_date.getRelative('day', 'year')).add(1).int()
226 | doy_coll = (
227 | ee.ImageCollection(et_reference_source)
228 | .filterMetadata('DOY', 'equals', image_doy)
229 | .select([et_reference_band], ['et_reference'])
230 | )
231 | # CGM - Was there a reason to use rangeContains if limiting to one DOY?
232 | # .filter(ee.Filter.rangeContains('DOY', doy, doy))\
233 | return (
234 | ee.Image(doy_coll.first())
235 | .set({'system:index': input_img.get('system:index'),
236 | 'system:time_start': input_img.get('system:time_start')})
237 | )
238 | # Note, the collection and band that are used are not important as
239 | # long as they are daily and available for the time period
240 | daily_et_ref_coll = (
241 | ee.ImageCollection('IDAHO_EPSCOR/GRIDMET')
242 | .filterDate(start_date, end_date)
243 | .select(['eto'])
244 | .map(doy_image)
245 | )
246 | else:
247 | raise ValueError(f'unsupported et_reference_date_type: {et_reference_date_type}')
248 | # elif isinstance(et_reference_source, computedobject.ComputedObject):
249 | # # Interpret computed objects as image collections
250 | # daily_et_ref_coll = (
251 | # et_reference_source
252 | # .filterDate(start_date, end_date)
253 | # .select([et_reference_band], ['et_reference'])
254 | # )
255 | else:
256 | raise ValueError(f'unsupported et_reference_source: {et_reference_source}')
257 |
258 | # Scale reference ET images (if necessary)
259 | if et_reference_factor and (et_reference_factor != 1):
260 | def et_reference_adjust(input_img):
261 | return (
262 | input_img.multiply(et_reference_factor)
263 | .copyProperties(input_img)
264 | .set({'system:time_start': input_img.get('system:time_start')})
265 | )
266 | daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
267 |
268 | # Initialize variable list to only variables that can be interpolated
269 | interp_vars = list(set(_interp_vars) & set(variables))
270 |
271 | # To return ET, the ETf must be interpolated
272 | if ('et' in variables) and ('et_fraction' not in interp_vars):
273 | interp_vars = interp_vars + ['et_fraction']
274 |
275 | # With the current interpolate.daily() function,
276 | # something has to be interpolated in order to return et_reference
277 | if ('et_reference' in variables) and ('et_fraction' not in interp_vars):
278 | interp_vars = interp_vars + ['et_fraction']
279 |
280 | # To compute the daily count, the ETf must be interpolated
281 | # We may want to add support for computing daily_count when interpolating NDVI
282 | if ('daily_count' in variables) and ('et_fraction' not in interp_vars):
283 | interp_vars = interp_vars + ['et_fraction']
284 |
285 | # TODO: Look into implementing et_fraction clamping here
286 | # (similar to et_actual below)
287 |
288 | def interpolate_prep(img):
289 | """Prep WRS2 scene images for interpolation
290 |
291 | "Unscale" the images using the "scale_factor_et_fraction" property
292 | and convert to double.
293 | Add a mask and time band to each image in the scene_coll since
294 | interpolator is assuming time and mask bands exist.
295 | The interpolation could be modified to get the mask from the
296 | time band instead of setting it here.
297 | The time image must be the 0 UTC time
298 |
299 | """
300 | mask_img = (
301 | img.select(['et_fraction']).multiply(0).add(1).updateMask(1).uint8().rename(['mask'])
302 | )
303 | time_img = (
304 | img.select(['et_fraction']).double().multiply(0)
305 | .add(utils.date_0utc(ee.Date(img.get('system:time_start'))).millis())
306 | .rename(['time'])
307 | )
308 |
309 | # Set the default scale factor to 1 if the image does not have the property
310 | scale_factor = (
311 | ee.Dictionary({'scale_factor': img.get('scale_factor_et_fraction')})
312 | .combine({'scale_factor': 1.0}, overwrite=False)
313 | )
314 |
315 | return (
316 | img.select(interp_vars)
317 | .double().multiply(ee.Number(scale_factor.get('scale_factor')))
318 | .addBands([mask_img, time_img])
319 | .set({
320 | 'system:time_start': ee.Number(img.get('system:time_start')),
321 | 'system:index': ee.String(img.get('system:index')),
322 | })
323 | )
324 |
325 | # Filter scene collection to the interpolation range
326 | # This probably isn't needed since scene_coll was built to this range
327 | # Then add the time and mask bands needed for interpolation
328 | scene_coll = ee.ImageCollection(
329 | scene_coll.filterDate(interp_start_date, interp_end_date)
330 | .map(interpolate_prep)
331 | )
332 |
333 | # For scene count, compute the composite/mosaic image for the mask band only
334 | if ('scene_count' in variables) or ('count' in variables):
335 | aggregate_coll = openet.core.interpolate.aggregate_to_daily(
336 | image_coll=scene_coll.select(['mask']),
337 | start_date=start_date,
338 | end_date=end_date,
339 | )
340 |
341 | # The following is needed because the aggregate collection can be
342 | # empty if there are no scenes in the target date range but there
343 | # are scenes in the interpolation date range.
344 | # Without this the count image will not be built but the other
345 | # bands will be which causes a non-homogeneous image collection.
346 | aggregate_coll = aggregate_coll.merge(
347 | ee.Image.constant(0).rename(['mask'])
348 | .set({'system:time_start': ee.Date(start_date).millis()})
349 | )
350 |
351 | # Interpolate to a daily time step
352 | # The time band is needed for interpolation
353 | daily_coll = openet.core.interpolate.daily(
354 | target_coll=daily_et_ref_coll,
355 | source_coll=scene_coll.select(interp_vars + ['time']),
356 | interp_method=interp_method,
357 | interp_days=interp_days,
358 | use_joins=use_joins,
359 | compute_product=False,
360 | )
361 |
362 | # The interpolate.daily() function can/will return the product of
363 | # the source and target image named as "{source_band}_1".
364 | # The problem with this approach is that it will drop any other bands
365 | # that are being interpolated (such as the ndvi).
366 | # daily_coll = daily_coll.select(['et_fraction_1'], ['et'])
367 |
368 | # Compute ET from ETf and ETr (if necessary)
369 | # This isn't needed if compute_product=True in daily() and band is renamed
370 | # The check for et_fraction is needed since it is back computed from ET and ETr
371 | # if 'et' in variables or 'et_fraction' in variables:
372 | def compute_et(img):
373 | """This function assumes ETf and ETr bands are present in the image"""
374 | # Apply any resampling to the reference ET image before computing ET
375 | et_reference_img = img.select(['et_reference'])
376 | if et_reference_resample and (et_reference_resample in ['bilinear', 'bicubic']):
377 | et_reference_img = et_reference_img.resample(et_reference_resample)
378 |
379 | et_img = img.select(['et_fraction']).multiply(et_reference_img)
380 |
381 | return img.addBands(et_img.double().rename('et'))
382 |
383 | daily_coll = daily_coll.map(compute_et)
384 |
385 | # This function is being declared here to avoid passing in all the common parameters
386 | # such as: daily_coll, daily_et_ref_coll, interp_properties, variables, etc.
387 | # Long term it should probably be declared outside of this function
388 | # so it can be called directly and tested separately, or read from openet-core
389 | def aggregate_image(agg_start_date, agg_end_date, date_format):
390 | """Aggregate the daily images within the target date range
391 |
392 | Parameters
393 | ----------
394 | agg_start_date: ee.Date, str
395 | Start date (inclusive).
396 | agg_end_date : ee.Date, str
397 | End date (exclusive).
398 | date_format : str
399 | Date format for system:index (uses EE JODA format).
400 |
401 | Returns
402 | -------
403 | ee.Image
404 |
405 | """
406 | et_img = None
407 | eto_img = None
408 |
409 | if ('et' in variables) or ('et_fraction' in variables):
410 | et_img = daily_coll.filterDate(agg_start_date, agg_end_date).select(['et']).sum()
411 |
412 | if ('et_reference' in variables) or ('et_fraction' in variables):
413 | eto_img = (
414 | daily_et_ref_coll.filterDate(agg_start_date, agg_end_date)
415 | .select(['et_reference']).sum()
416 | )
417 | if et_reference_resample and (et_reference_resample in ['bilinear', 'bicubic']):
418 | eto_img = (
419 | eto_img.setDefaultProjection(daily_et_ref_coll.first().projection())
420 | .resample(et_reference_resample)
421 | )
422 |
423 | # Count the number of interpolated/aggregated values
424 | # Mask pixels that do not have a full aggregation count for the start/end
425 | # Use "et" band so that count is a function of ET and reference ET
426 | if ('et' in variables) or ('et_fraction' in variables) or ('et_reference' in variables):
427 | aggregation_band = 'et'
428 | elif 'ndvi' in variables:
429 | aggregation_band = 'ndvi'
430 | else:
431 | raise ValueError('no supported aggregation band')
432 | aggregation_count_img = (
433 | daily_coll.filterDate(agg_start_date, agg_end_date)
434 | .select([aggregation_band]).reduce(ee.Reducer.count())
435 | )
436 |
437 | image_list = []
438 | if 'et' in variables:
439 | image_list.append(et_img.float())
440 | if 'et_reference' in variables:
441 | image_list.append(eto_img.float())
442 | if 'et_fraction' in variables:
443 | # Compute average et fraction over the aggregation period
444 | image_list.append(et_img.divide(eto_img).rename(['et_fraction']).float())
445 | if 'ndvi' in variables:
446 | # Compute average NDVI over the aggregation period
447 | ndvi_img = (
448 | daily_coll.filterDate(agg_start_date, agg_end_date)
449 | .select(['ndvi']).mean().float()
450 | )
451 | image_list.append(ndvi_img)
452 | if ('scene_count' in variables) or ('count' in variables):
453 | scene_count_img = (
454 | aggregate_coll.filterDate(agg_start_date, agg_end_date)
455 | .select(['mask']).reduce(ee.Reducer.sum()).rename('count')
456 | .uint8()
457 | )
458 | image_list.append(scene_count_img)
459 | if 'daily_count' in variables:
460 | image_list.append(aggregation_count_img.rename('daily_count').uint8())
461 |
462 | output_img = ee.Image(image_list)
463 |
464 | if mask_partial_aggregations:
465 | aggregation_days = ee.Date(agg_end_date).difference(ee.Date(agg_start_date), 'day')
466 | aggregation_count_mask = aggregation_count_img.gte(aggregation_days.subtract(1))
467 | output_img = output_img.updateMask(aggregation_count_mask)
468 |
469 | return (
470 | output_img
471 | .set({
472 | 'system:index': ee.Date(agg_start_date).format(date_format),
473 | 'system:time_start': ee.Date(agg_start_date).millis(),
474 | })
475 | )
476 |
477 | # Combine input, interpolated, and derived values
478 | if t_interval.lower() == 'custom':
479 | # Return an ImageCollection to be consistent with the other t_interval options
480 | return ee.ImageCollection(aggregate_image(
481 | agg_start_date=start_date,
482 | agg_end_date=end_date,
483 | date_format='YYYYMMdd',
484 | ))
485 | elif t_interval.lower() == 'daily':
486 | def agg_daily(daily_img):
487 | # CGM - Double check that this time_start is a 0 UTC time.
488 | # It should be since it is coming from the interpolate source
489 | # collection, but what if source is GRIDMET (+6 UTC)?
490 | agg_start_date = ee.Date(daily_img.get('system:time_start'))
491 | # This calls .sum() on collections with only one image
492 | return aggregate_image(
493 | agg_start_date=agg_start_date,
494 | agg_end_date=ee.Date(agg_start_date).advance(1, 'day'),
495 | date_format='YYYYMMdd',
496 | )
497 | return ee.ImageCollection(daily_coll.map(agg_daily))
498 | elif t_interval.lower() == 'monthly':
499 | def month_gen(iter_start_dt, iter_end_dt):
500 | iter_dt = iter_start_dt
501 | # Conditional is "less than" because end date is exclusive
502 | while iter_dt < iter_end_dt:
503 | yield iter_dt.strftime('%Y-%m-%d')
504 | iter_dt += relativedelta(months=+1)
505 | def agg_monthly(agg_start_date):
506 | return aggregate_image(
507 | agg_start_date=agg_start_date,
508 | agg_end_date=ee.Date(agg_start_date).advance(1, 'month'),
509 | date_format='YYYYMM',
510 | )
511 | return ee.ImageCollection(ee.List(list(month_gen(start_dt, end_dt))).map(agg_monthly))
512 | else:
513 | raise ValueError(f'unsupported t_interval: {t_interval}')
514 |
--------------------------------------------------------------------------------
/openet/ssebop/ipytest.py:
--------------------------------------------------------------------------------
1 | # ===============================================================================
2 | # Copyright 2019 Gabriel Parrish
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ===============================================================================
15 | import os
16 |
17 | def print_where():
18 | print('this is definitely the branch you want')
--------------------------------------------------------------------------------
/openet/ssebop/landsat.py:
--------------------------------------------------------------------------------
1 | import ee
2 |
3 |
4 | def emissivity(landsat_image):
5 | """Emissivity as a function of NDVI
6 |
7 | Parameters
8 | ----------
9 | landsat_image : ee.Image
10 | "Prepped" Landsat image with standardized band names.
11 |
12 | Returns
13 | -------
14 | ee.Image
15 |
16 | References
17 | ----------
18 | .. [Sobrino2004] Sobrino, J., J. Jiménez-Muñoz, & L. Paolini (2004).
19 | Land surface temperature retrieval from LANDSAT TM 5.
20 | Remote Sensing of Environment, 90(4), 434-440.
21 | https://doi.org/10.1016/j.rse.2004.02.003
22 |
23 | """
24 | ndvi_img = ndvi(landsat_image)
25 | pv = ndvi_img.expression('((ndvi - 0.2) / 0.3) ** 2', {'ndvi': ndvi_img})
26 |
27 | # Assuming typical Soil Emissivity of 0.97 and Veg Emissivity of 0.99
28 | # and shape Factor mean value of 0.553
29 | de = pv.expression('(1 - 0.97) * (1 - Pv) * (0.55 * 0.99)', {'Pv': pv})
30 | range_emis = de.expression('(0.99 * Pv) + (0.97 * (1 - Pv)) + dE', {'Pv': pv, 'dE': de})
31 |
32 | return (
33 | ndvi_img
34 | .where(ndvi_img.lt(0), 0.985)
35 | .where(ndvi_img.gte(0).And(ndvi_img.lt(0.2)), 0.977)
36 | .where(ndvi_img.gt(0.5), 0.99)
37 | .where(ndvi_img.gte(0.2).And(ndvi_img.lte(0.5)), range_emis)
38 | .clamp(0.977, 0.99)
39 | .rename(['emissivity'])
40 | )
41 |
42 |
43 | def lst(landsat_image):
44 | """Emissivity corrected land surface temperature (LST) from brightness Ts.
45 |
46 | Parameters
47 | ----------
48 | landsat_image : ee.Image
49 | "Prepped" Landsat image with standardized band names.
50 | Image must also have 'k1_constant' and 'k2_constant' properties.
51 |
52 | Returns
53 | -------
54 | ee.Image
55 |
56 | Notes
57 | -----
58 | The corrected radiation coefficients were derived from a small number
59 | of scenes in southern Idaho [Allen2007] and may not be appropriate for
60 | other areas.
61 |
62 | References
63 | ----------
64 | .. [Allen2007] R. Allen, M. Tasumi, R. Trezza (2007),
65 | Satellite-Based Energy Balance for Mapping Evapotranspiration with
66 | Internalized Calibration (METRIC) Model,
67 | Journal of Irrigation and Drainage Engineering, Vol 133(4),
68 | http://dx.doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
69 |
70 | Notes
71 | -----
72 | tnb = 0.866 # narrow band transmissivity of air
73 | rp = 0.91 # path radiance
74 | rsky = 1.32 # narrow band clear sky downward thermal radiation
75 |
76 | """
77 | # Get properties from image
78 | k1 = ee.Number(ee.Image(landsat_image).get('k1_constant'))
79 | k2 = ee.Number(ee.Image(landsat_image).get('k2_constant'))
80 |
81 | ts_brightness = ee.Image(landsat_image).select(['tir'])
82 | emissivity_img = emissivity(landsat_image)
83 |
84 | # First back out radiance from brightness temperature
85 | # Then recalculate emissivity corrected Ts
86 | thermal_rad_toa = ts_brightness.expression(
87 | 'k1 / (exp(k2 / ts_brightness) - 1)',
88 | {'ts_brightness': ts_brightness, 'k1': k1, 'k2': k2}
89 | )
90 |
91 | rc = thermal_rad_toa.expression(
92 | '((thermal_rad_toa - rp) / tnb) - ((1 - emiss) * rsky)',
93 | {
94 | 'thermal_rad_toa': thermal_rad_toa,
95 | 'emiss': emissivity_img,
96 | 'rp': 0.91, 'tnb': 0.866, 'rsky': 1.32,
97 | }
98 | )
99 | lst = rc.expression(
100 | 'k2 / log(emiss * k1 / rc + 1)',
101 | {'emiss': emissivity_img, 'rc': rc, 'k1': k1, 'k2': k2}
102 | )
103 |
104 | return lst.rename(['lst'])
105 |
106 |
107 | def ndvi(landsat_image, gsw_extent_flag=True):
108 | """Normalized difference vegetation index
109 |
110 | Parameters
111 | ----------
112 | landsat_image : ee.Image
113 | "Prepped" Landsat image with standardized band names.
114 | gsw_extent_flag : boolean
115 | If True, apply the global surface water extent mask to the QA_PIXEL water mask
116 | The default is True.
117 |
118 | Returns
119 | -------
120 | ee.Image
121 |
122 | """
123 | # Force the input values to be at greater than or equal to zero
124 | # since C02 surface reflectance values can be negative
125 | # but the normalizedDifference function will return nodata
126 | ndvi_img = landsat_image.max(0).normalizedDifference(['nir', 'red'])
127 |
128 | b1 = landsat_image.select(['nir'])
129 | b2 = landsat_image.select(['red'])
130 |
131 | # Assume that very high reflectance values are unreliable for computing the index
132 | # and set the output value to 0
133 | # Threshold value could be set lower, but for now only trying to catch saturated pixels
134 | ndvi_img = ndvi_img.where(b1.gte(1).Or(b2.gte(1)), 0)
135 |
136 | # Including the global surface water maximum extent to help remove shadows that
137 | # are misclassified as water
138 | # The flag is needed so that the image can be bypassed during testing with constant images
139 | qa_water_mask = landsat_c2_qa_water_mask(landsat_image)
140 | if gsw_extent_flag:
141 | gsw_mask = ee.Image('JRC/GSW1_4/GlobalSurfaceWater').select(['max_extent']).gte(1)
142 | qa_water_mask = qa_water_mask.And(gsw_mask)
143 |
144 | # Assume that low reflectance values are unreliable for computing the index
145 | # If both reflectance values are below the threshold,
146 | # and if the pixel is flagged as water, set the output to -0.1 (should this be -1?)
147 | # otherwise set the output to 0
148 | ndvi_img = ndvi_img.where(b1.lt(0.01).And(b2.lt(0.01)), 0)
149 | ndvi_img = ndvi_img.where(b1.lt(0.01).And(b2.lt(0.01)).And(qa_water_mask), -0.1)
150 | # Should there be an additional check for if either value was negative?
151 | # ndvi_img = ndvi_img.where(b1.lt(0).Or(b2.lt(0)), 0)
152 |
153 | return ndvi_img.clamp(-1.0, 1.0).rename(['ndvi'])
154 |
155 |
156 | def ndwi(landsat_image):
157 | """Normalized difference water index
158 |
159 | Parameters
160 | ----------
161 | landsat_image : ee.Image
162 | "Prepped" Landsat image with standardized band names.
163 |
164 | Returns
165 | -------
166 | ee.Image
167 |
168 | """
169 | # Force the input values to be at greater than or equal to zero
170 | # since C02 surface reflectance values can be negative
171 | # but the normalizedDifference function will return nodata
172 | ndwi_img = landsat_image.max(0).normalizedDifference(['swir1', 'green'])
173 |
174 | b1 = landsat_image.select(['swir1'])
175 | b2 = landsat_image.select(['green'])
176 |
177 | # Assume that very high reflectance values are unreliable for computing the index
178 | # and set the output value to 0
179 | # Threshold value could be set lower, but for now only trying to catch saturated pixels
180 | ndwi_img = ndwi_img.where(b1.gte(1).Or(b2.gte(1)), 0)
181 |
182 | # Assume that low reflectance values are unreliable for computing the index
183 | # If both reflectance values are below the threshold set the output to 0
184 | # May want to check the QA water mask here also, similar to the NDVI calculation
185 | ndwi_img = ndwi_img.where(b1.lt(0.01).And(b2.lt(0.01)), 0)
186 |
187 | return ndwi_img.clamp(-1.0, 1.0).rename(['ndwi'])
188 |
189 |
190 | def landsat_c2_qa_water_mask(landsat_image):
191 | """Extract water mask from the Landsat Collection 2 SR QA_PIXEL band.
192 |
193 | Parameters
194 | ----------
195 | landsat_image : ee.Image
196 | Landsat C02 image with a QA_PIXEL band.
197 |
198 | Returns
199 | -------
200 | ee.Image
201 |
202 | """
203 | return (
204 | ee.Image(landsat_image)
205 | .select(['QA_PIXEL'])
206 | .rightShift(7).bitwiseAnd(1).neq(0)
207 | .rename(['qa_water'])
208 | )
209 |
--------------------------------------------------------------------------------
/openet/ssebop/model.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import ee
4 |
5 | import openet.refetgee
6 |
7 |
8 | def et_fraction(lst, tcold, dt):
9 | """SSEBop fraction of reference ET (ETf)
10 |
11 | Parameters
12 | ----------
13 | lst : ee.Image
14 | Land surface temperature (lst) [L].
15 | tcold : ee.Image
16 | Cold temperature [K].
17 | dt : ee.Image, ee.Number
18 | Temperature difference [K].
19 |
20 | Returns
21 | -------
22 | ee.Image
23 |
24 | References
25 | ----------
26 |
27 |
28 | Notes
29 | -----
30 | Clamping function assumes this is an alfalfa fraction.
31 |
32 | """
33 |
34 | etf = lst.expression(
35 | '(lst * (-1) + tcold + dt) / dt',
36 | {'lst': lst, 'tcold': tcold, 'dt': dt}
37 | )
38 |
39 | return etf.updateMask(etf.lte(2.0)).clamp(0, 1.0).rename(['et_fraction'])
40 |
41 |
42 | def dt(tmax, tmin, elev, doy, lat=None, rs=None, ea=None):
43 | """Temperature difference between hot/dry ground and cold/wet canopy
44 |
45 | Parameters
46 | ----------
47 | tmax : ee.Image, ee.Number
48 | Maximum daily air temperature [K].
49 | tmin : ee.Image, ee.Number
50 | Maximum daily air temperature [K].
51 | elev : ee.Image, ee.Number
52 | Elevation [m].
53 | doy : ee.Number, int
54 | Day of year.
55 | lat : ee.Image, ee.Number, optional
56 | Latitude [deg]. If not set, use GEE pixelLonLat() method.
57 | rs : ee.Image, ee.Number, optional
58 | Incoming solar radiation [MJ m-2 d-1]. If not set the theoretical
59 | clear sky solar (Rso) will be used for the Rs.
60 | ea : ee.Image, ee.Number, optional
61 | Actual vapor pressure [kPa]. If not set, vapor pressure will be
62 | computed from Tmin.
63 |
64 | Returns
65 | -------
66 | ee.Image
67 |
68 | Raises
69 | ------
70 | ValueError if doy is not set.
71 |
72 | References
73 | ----------
74 | .. [FAO56] Allen, R., Pereira, L., Raes, D., & Smith, M. (1998).
75 | Crop evapotranspiration: Guidelines for computing crop water
76 | requirements. FAO Irrigation and Drainage Paper (Vol. 56).
77 | .. [Senay2018] Senay, G. (2018). Satellite psychrometric formulation of
78 | the operational simplified surface energy balance (SSEBop) model for
79 | quantifying and mapping evapotranspiration.
80 | Applied Engineering in Agriculture, Vol 34(3).
81 |
82 | """
83 | if lat is None:
84 | lat = ee.Image.pixelLonLat().select(['latitude'])
85 | if doy is None:
86 | # TODO: attempt to read time_start from one of the images
87 | raise ValueError('doy must be set')
88 |
89 | # Convert latitude to radians
90 | phi = lat.multiply(math.pi / 180)
91 |
92 | # Make a DOY image from the DOY number
93 | doy = tmax.multiply(0).add(doy)
94 |
95 | # Extraterrestrial radiation (Ra) (FAO56 Eqns 24, 25, 23, 21)
96 | delta = doy.multiply(2 * math.pi / 365).subtract(1.39).sin().multiply(0.409)
97 | ws = phi.tan().multiply(-1).multiply(delta.tan()).acos()
98 | dr = doy.multiply(2 * math.pi / 365).cos().multiply(0.033).add(1)
99 | ra = (
100 | ws.multiply(phi.sin()).multiply(delta.sin())
101 | .add(phi.cos().multiply(delta.cos()).multiply(ws.sin()))
102 | .multiply(dr).multiply((1367.0 / math.pi) * 0.0820)
103 | )
104 |
105 | # Simplified clear sky solar formulation (Rso) [MJ m-2 d-1] (Eqn 37)
106 | rso = elev.multiply(2E-5).add(0.75).multiply(ra)
107 |
108 | # Derive cloudiness fraction from Rs and Rso (see FAO56 Eqn 39)
109 | # Use Rso for Rs if not set
110 | if rs is None:
111 | rs = rso.multiply(1)
112 | fcd = 1
113 | else:
114 | fcd = rs.divide(rso).max(0.3).min(1.0).multiply(1.35).subtract(0.35)
115 |
116 | # Net shortwave radiation [MJ m-2 d-1] (FAO56 Eqn 38)
117 | rns = rs.multiply(1 - 0.23)
118 |
119 | # Actual vapor pressure [kPa] (FAO56 Eqn 14)
120 | if ea is None:
121 | ea = (
122 | tmin.subtract(273.15).multiply(17.27)
123 | .divide(tmin.subtract(273.15).add(237.3))
124 | .exp().multiply(0.6108)
125 | )
126 |
127 | # Net longwave radiation [MJ m-2 d-1] (FAO56 Eqn 39)
128 | rnl = (
129 | tmax.pow(4).add(tmin.pow(4))
130 | .multiply(ea.sqrt().multiply(-0.14).add(0.34))
131 | .multiply(4.901E-9 * 0.5).multiply(fcd)
132 | )
133 |
134 | # Net radiation [MJ m-2 d-1] (FAO56 Eqn 40)
135 | rn = rns.subtract(rnl)
136 |
137 | # Air pressure [kPa] (FAO56 Eqn 7)
138 | pair = elev.multiply(-0.0065).add(293.0).divide(293.0).pow(5.26).multiply(101.3)
139 |
140 | # Air density [Kg m-3] (Senay2018 A.11 & A.13)
141 | den = tmax.add(tmin).multiply(0.5).pow(-1).multiply(pair).multiply(3.486 / 1.01)
142 |
143 | # Temperature difference [K] (Senay2018 A.5)
144 | return rn.divide(den).multiply(110.0 / ((1.013 / 1000) * 86400))
145 |
146 |
147 | # TODO: Decide if using the interpolated instantaneous is the right/best approach
148 | # We could use the closest hour in time, an average of a few hours
149 | # or just switch to using the raw daily or bias corrected assets
150 | def etf_grass_type_adjust(etf, src_coll_id, time_start, resample_method='bilinear'):
151 | """"Convert ET fraction from an alfalfa reference to grass reference
152 |
153 | Parameters
154 | ----------
155 | etf : ee.Image
156 | ET fraction (alfalfa reference).
157 | src_coll_id : str
158 | Hourly meteorology collection ID for computing reference ET.
159 | time_start : int, ee.Number
160 | Image system time start [millis].
161 | resample_method : {'nearest', 'bilinear', 'bicubic'}
162 | Resample method for hourly meteorology collection.
163 |
164 | Returns
165 | -------
166 | ee.Image
167 |
168 | """
169 | hourly_et_reference_sources = [
170 | 'NASA/NLDAS/FORA0125_H002',
171 | 'ECMWF/ERA5_LAND/HOURLY',
172 | ]
173 | if src_coll_id not in hourly_et_reference_sources:
174 | raise ValueError(f'unsupported hourly ET reference source: {src_coll_id}')
175 | elif not src_coll_id:
176 | raise ValueError('hourly ET reference source not')
177 | else:
178 | src_coll = ee.ImageCollection(src_coll_id)
179 |
180 | # Interpolating hourly NLDAS to the Landsat scene time
181 | # CGM - The 2 hour window is useful in case an image is missing
182 | # I think EEMETRIC is using a 4 hour window
183 | # CGM - Need to check if the NLDAS images are instantaneous
184 | # or some sort of average of the previous or next hour
185 | time_start = ee.Number(time_start)
186 | prev_img = ee.Image(
187 | src_coll
188 | .filterDate(time_start.subtract(2 * 60 * 60 * 1000), time_start)
189 | .limit(1, 'system:time_start', False)
190 | .first()
191 | )
192 | next_img = ee.Image(
193 | src_coll.filterDate(time_start, time_start.add(2 * 60 * 60 * 1000)).first()
194 | )
195 | prev_time = ee.Number(prev_img.get('system:time_start'))
196 | next_time = ee.Number(next_img.get('system:time_start'))
197 | time_ratio = time_start.subtract(prev_time).divide(next_time.subtract(prev_time))
198 | interp_img = (
199 | next_img.subtract(prev_img).multiply(time_ratio).add(prev_img)
200 | .set({'system:time_start': time_start})
201 | )
202 |
203 | if src_coll_id.upper() == 'NASA/NLDAS/FORA0125_H002':
204 | ratio = (
205 | openet.refetgee.Hourly.nldas(interp_img).etr
206 | .divide(openet.refetgee.Hourly.nldas(interp_img).eto)
207 | )
208 | if resample_method and (resample_method.lower() in ['bilinear', 'bicubic']):
209 | ratio = ratio.resample(resample_method)
210 | etf_grass = etf.multiply(ratio)
211 | elif src_coll_id.upper() == 'ECMWF/ERA5_LAND/HOURLY':
212 | ratio = (
213 | openet.refetgee.Hourly.era5_land(interp_img).etr
214 | .divide(openet.refetgee.Hourly.era5_land(interp_img).eto)
215 | )
216 | if resample_method and (resample_method.lower() in ['bilinear', 'bicubic']):
217 | ratio = ratio.resample(resample_method)
218 | etf_grass = etf.multiply(ratio)
219 |
220 | return etf_grass
221 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import logging
3 | import os
4 |
5 | import ee
6 | import pytest
7 |
8 |
9 | @pytest.fixture(scope="session", autouse=True)
10 | def test_init():
11 | logging.basicConfig(level=logging.DEBUG, format='%(message)s')
12 | logging.getLogger('googleapiclient').setLevel(logging.ERROR)
13 | logging.debug('Test Setup')
14 |
15 | # For GitHub Actions authenticate using private key environment variable
16 | if 'EE_PRIVATE_KEY_B64' in os.environ:
17 | print('Writing privatekey.json from environmental variable ...')
18 | content = base64.b64decode(os.environ['EE_PRIVATE_KEY_B64']).decode('ascii')
19 | EE_KEY_FILE = 'privatekey.json'
20 | with open(EE_KEY_FILE, 'w') as f:
21 | f.write(content)
22 | ee.Initialize(ee.ServiceAccountCredentials('', key_file=EE_KEY_FILE))
23 | else:
24 | ee.Initialize()
25 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/test_a_utils.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | import ee
4 | import pytest
5 |
6 | import openet.ssebop.utils as utils
7 |
8 |
9 | def test_getinfo():
10 | assert utils.getinfo(ee.Number(1)) == 1
11 |
12 |
13 | def test_getinfo_exception():
14 | with pytest.raises(Exception):
15 | utils.getinfo('deadbeef')
16 |
17 |
18 | # # CGM - Not sure how to trigger an EEException to test that the output is None
19 | # # This fails before it is sent to the getinfo function
20 | # def test_getinfo_eeexception():
21 | # assert utils.getinfo(ee.Number('deadbeef')) is None
22 |
23 |
24 | def test_constant_image_value(expected=10.123456789, tol=0.000001):
25 | output = utils.constant_image_value(ee.Image.constant(expected))
26 | assert abs(output['constant'] - expected) <= tol
27 |
28 |
29 | @pytest.mark.parametrize(
30 | 'image_id, xy, scale, expected, tol',
31 | [
32 | ['USGS/3DEP/10m', [-106.03249, 37.17777], 30, 2364.169, 0.001],
33 | ['USGS/3DEP/10m', [-106.03249, 37.17777], 10, 2364.138, 0.001],
34 | ['USGS/3DEP/10m', [-106.03249, 37.17777], 1, 2364.138, 0.001],
35 | ['NASA/NASADEM_HGT/001', [-106.03249, 37.17777], 30, 2361, 0.001],
36 | ]
37 | )
38 | def test_point_image_value(image_id, xy, scale, expected, tol):
39 | output = utils.point_image_value(
40 | ee.Image(image_id).select(['elevation'], ['output']), xy, scale
41 | )
42 | assert abs(output['output'] - expected) <= tol
43 |
44 |
45 | @pytest.mark.parametrize(
46 | 'image_id, image_date, xy, scale, expected, tol',
47 | [
48 | ['USGS/3DEP/10m', '2012-04-04', [-106.03249, 37.17777], 30, 2364.169, 0.001],
49 | ['USGS/3DEP/10m', '2012-04-04', [-106.03249, 37.17777], 10, 2364.097, 0.001],
50 | ['USGS/3DEP/10m', '2012-04-04', [-106.03249, 37.17777], 1, 2364.138, 0.001],
51 | ['NASA/NASADEM_HGT/001', '2012-04-04', [-106.03249, 37.17777], 30, 2361, 0.001],
52 | ]
53 | )
54 | def test_point_coll_value(image_id, image_date, xy, scale, expected, tol):
55 | # The image must have a system:time_start for this function to work correctly
56 | input_img = (
57 | ee.Image(image_id).select(['elevation'], ['output'])
58 | .set({'system:time_start': ee.Date(image_date).millis()})
59 | )
60 | output = utils.point_coll_value(ee.ImageCollection([input_img]), xy, scale)
61 | assert abs(output['output'][image_date] - expected) <= tol
62 |
63 |
64 | def test_c_to_k(c=20, k=293.15, tol=0.000001):
65 | output = utils.constant_image_value(utils.c_to_k(ee.Image.constant(c)))
66 | assert abs(output['constant'] - k) <= tol
67 |
68 |
69 | @pytest.mark.parametrize(
70 | 'input, expected',
71 | [
72 | ['2015-07-13T18:33:39', 1436745600000],
73 | ['2015-07-13T00:00:00', 1436745600000],
74 | ]
75 | )
76 | def test_date_0utc(input, expected):
77 | assert utils.getinfo(utils.date_0utc(ee.Date(input)).millis()) == expected
78 |
79 |
80 | @pytest.mark.parametrize(
81 | # Note: These are made up values
82 | 'input, expected',
83 | [
84 | [300, True],
85 | ['300', True],
86 | [300.25, True],
87 | ['300.25', True],
88 | ['a', False],
89 | ]
90 | )
91 | def test_is_number(input, expected):
92 | assert utils.is_number(input) == expected
93 |
94 |
95 | def test_millis():
96 | assert utils.millis(datetime.datetime(2015, 7, 13)) == 1436745600000
97 |
98 |
99 | def test_valid_date():
100 | assert utils.valid_date('2015-07-13') is True
101 | assert utils.valid_date('2015-02-30') is False
102 | assert utils.valid_date('20150713') is False
103 | assert utils.valid_date('07/13/2015') is False
104 | assert utils.valid_date('07-13-2015', '%m-%d-%Y') is True
105 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/test_b_landsat.py:
--------------------------------------------------------------------------------
1 | # import datetime
2 | # import pprint
3 |
4 | import ee
5 | import pytest
6 |
7 | import openet.ssebop.landsat as landsat
8 | import openet.ssebop.utils as utils
9 | # TODO: import utils from openet.core
10 | # import openet.core.utils as utils
11 |
12 |
13 | def sr_image(blue=0.1, green=0.1, red=0.1, nir=0.9, swir1=0.1, swir2=0.1, bt=305, qa=1):
14 | """Construct a fake Landsat 8 SR image with renamed bands"""
15 | return (
16 | ee.Image.constant([blue, green, red, nir, swir1, swir2, bt, qa])
17 | .rename(['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'tir', 'QA_PIXEL'])
18 | .set({
19 | # 'system:time_start': ee.Date(SCENE_DATE).millis(),
20 | 'k1_constant': ee.Number(607.76),
21 | 'k2_constant': ee.Number(1260.56),
22 | })
23 | )
24 |
25 |
26 | def test_ndvi_band_name():
27 | output = utils.getinfo(landsat.ndvi(sr_image()))
28 | assert output['bands'][0]['id'] == 'ndvi'
29 |
30 |
31 | @pytest.mark.parametrize(
32 | 'red, nir, expected',
33 | [
34 | [0.02, 0.9 / 55, -0.1],
35 | [0.02, 0.02, 0.0],
36 | [0.01, 0.11 / 9, 0.1],
37 | [0.02, 0.03, 0.2],
38 | [0.01, 0.13 / 7, 0.3],
39 | [0.03, 0.07, 0.4],
40 | [0.02, 0.06, 0.5],
41 | [0.02, 0.08, 0.6],
42 | [0.01, 0.17 / 3, 0.7],
43 | [0.01, 0.09, 0.8],
44 | ]
45 | )
46 | def test_ndvi_calculation(red, nir, expected, tol=0.000001):
47 | output = utils.constant_image_value(landsat.ndvi(sr_image(red=red, nir=nir)))
48 | assert abs(output['ndvi'] - expected) <= tol
49 |
50 |
51 | @pytest.mark.parametrize(
52 | 'red, nir, expected',
53 | [
54 | [1.0, 0.4, 0.0],
55 | [0.4, 1.0, 0.0],
56 | [1.0, 1.0, 0.0],
57 | ]
58 | )
59 | def test_ndvi_saturated_reflectance(red, nir, expected, tol=0.000001):
60 | # Check that saturated reflectance values return 0
61 | output = utils.constant_image_value(landsat.ndvi(sr_image(red=red, nir=nir)))
62 | assert abs(output['ndvi'] - expected) <= tol
63 |
64 |
65 | @pytest.mark.parametrize(
66 | 'red, nir, expected',
67 | [
68 | [-0.1, -0.1, 0.0],
69 | [0.0, 0.0, 0.0],
70 | [0.009, 0.009, 0.0],
71 | [0.009, -0.01, 0.0],
72 | [-0.01, 0.009, 0.0],
73 | # Check that calculation works correctly if one value is above threshold
74 | [-0.01, 0.1, 1.0],
75 | [0.1, -0.01, -1.0],
76 | ]
77 | )
78 | def test_ndvi_negative_non_water(red, nir, expected, tol=0.000001):
79 | # Check that non-water pixels with very low or negative reflectance values are set to 0.0
80 | output = utils.constant_image_value(landsat.ndvi(sr_image(red=red, nir=nir, qa=1)))
81 | assert abs(output['ndvi'] - expected) <= tol
82 |
83 |
84 | @pytest.mark.parametrize(
85 | 'red, nir, expected',
86 | [
87 | [-0.1, -0.1, -0.1],
88 | [0.0, 0.0, -0.1],
89 | [0.009, 0.009, -0.1],
90 | [0.009, -0.01, -0.1],
91 | [-0.01, 0.009, -0.1],
92 | ]
93 | )
94 | def test_ndvi_negative_water(red, nir, expected, tol=0.000001):
95 | # Check that water pixels with very low or negative reflectance values are set to -0.1
96 | output = utils.constant_image_value(landsat.ndvi(
97 | sr_image(red=red, nir=nir, qa=128), gsw_extent_flag=False
98 | ))
99 | assert abs(output['ndvi'] - expected) <= tol
100 |
101 |
102 | def test_ndwi_band_name():
103 | output = utils.getinfo(landsat.ndwi(sr_image()))
104 | assert output['bands'][0]['id'] == 'ndwi'
105 |
106 |
107 | @pytest.mark.parametrize(
108 | 'green, swir1, expected',
109 | [
110 | [0.01, 0.07, 0.75],
111 | [0.01, 0.03, 0.5],
112 | [0.9 / 55, 0.02, 0.1],
113 | [0.2, 0.2, 0.0],
114 | [0.11 / 9, 0.01, -0.1],
115 | [0.07, 0.03, -0.4],
116 | [0.09, 0.01, -0.8],
117 | ]
118 | )
119 | def test_ndwi_calculation(green, swir1, expected, tol=0.000001):
120 | output = utils.constant_image_value(landsat.ndwi(sr_image(green=green, swir1=swir1)))
121 | assert abs(output['ndwi'] - expected) <= tol
122 |
123 |
124 | @pytest.mark.parametrize(
125 | 'green, swir1, expected',
126 | [
127 | [-0.1, -0.1, 0.0],
128 | [0.0, 0.0, 0.0],
129 | [0.009, 0.009, 0.0],
130 | [0.009, -0.01, 0.0],
131 | [-0.01, 0.009, 0.0],
132 | # Check that calculation works correctly if one value is above threshold
133 | [-0.01, 0.1, 1.0],
134 | [0.1, -0.01, -1.0],
135 | ]
136 | )
137 | def test_ndwi_negative_reflectance(green, swir1, expected, tol=0.000001):
138 | # Check that very low or negative reflectance values are set to 0
139 | output = utils.constant_image_value(landsat.ndwi(sr_image(green=green, swir1=swir1)))
140 | assert abs(output['ndwi'] - expected) <= tol
141 |
142 |
143 | @pytest.mark.parametrize(
144 | 'green, swir1, expected',
145 | [
146 | [1.0, 0.4, 0.0],
147 | [0.4, 1.0, 0.0],
148 | [1.0, 1.0, 0.0],
149 | ]
150 | )
151 | def test_ndwi_saturated_reflectance(green, swir1, expected, tol=0.000001):
152 | # Check that saturated reflectance values return 0
153 | output = utils.constant_image_value(landsat.ndwi(sr_image(green=green, swir1=swir1)))
154 | assert abs(output['ndwi'] - expected) <= tol
155 |
156 |
157 | def test_emissivity_band_name():
158 | output = utils.getinfo(landsat.emissivity(sr_image()))
159 | assert output['bands'][0]['id'] == 'emissivity'
160 |
161 |
162 | @pytest.mark.parametrize(
163 | 'red, nir, expected',
164 | [
165 | [0.02, 0.9 / 55, 0.985], # -0.1
166 | [0.02, 0.02, 0.977], # 0.0
167 | [0.01, 0.11 / 9, 0.977], # 0.1
168 | [0.02, 0.02999, 0.977], # 0.3- (0.3 NIR isn't exactly an NDVI of 0.2)
169 | [0.02, 0.03001, 0.986335], # 0.3+
170 | [0.01, 0.13 / 7, 0.986742], # 0.3
171 | [0.03, 0.07, 0.987964], # 0.4
172 | [0.02, 0.06, 0.99], # 0.5
173 | [0.02, 0.08, 0.99], # 0.6
174 | [0.01, 0.17 / 3, 0.99], # 0.7
175 | ]
176 | )
177 | def test_emissivity_calculation(red, nir, expected, tol=0.000001):
178 | output = utils.constant_image_value(landsat.emissivity(sr_image(red=red, nir=nir)))
179 | assert abs(output['emissivity'] - expected) <= tol
180 |
181 |
182 | def test_lst_band_name():
183 | output = utils.getinfo(landsat.lst(sr_image()))
184 | assert output['bands'][0]['id'] == 'lst'
185 |
186 |
187 | @pytest.mark.parametrize(
188 | 'red, nir, bt, expected',
189 | [
190 | [0.02, 0.07, 300, 303.471031],
191 | ]
192 | )
193 | def test_lst_calculation(red, nir, bt, expected, tol=0.000001):
194 | output = utils.constant_image_value(landsat.lst(sr_image(red=red, nir=nir, bt=bt)))
195 | assert abs(output['lst'] - expected) <= tol
196 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/test_b_model.py:
--------------------------------------------------------------------------------
1 | import ee
2 | import pytest
3 |
4 | import openet.ssebop.model as model
5 | import openet.ssebop.utils as utils
6 |
7 | COLL_ID = 'LANDSAT/LC08/C02/T1_L2'
8 | SCENE_ID = 'LC08_042035_20150713'
9 | SCENE_TIME = 1436812419150
10 | SCENE_POINT = (-119.5, 36.0)
11 | TEST_POINT = (-119.44252382373145, 36.04047742246546)
12 |
13 |
14 | @pytest.mark.parametrize(
15 | # Note: These are made up values
16 | 'lst, ndvi, dt, tcold, expected',
17 | [
18 | # Basic ETf test
19 | [308, 0.50, 10, 0.98 * 310, 0.58],
20 | # Test ETf clamp conditions
21 | [300, 0.80, 15, 0.98 * 310, 1.0], # Clamped to 1.0
22 | [319, 0.80, 15, 0.98 * 310, 0.0],
23 | # Test dT high, max/min, and low clamp values
24 | # CGM: dT clamping currently happens when dT source is read
25 | # [305, 0.80, 26, 0.98, 310, 0.952],
26 | [305, 0.80, 25, 0.98 * 310, 0.952],
27 | [305, 0.80, 6, 0.98 * 310, 0.8],
28 | # [305, 0.80, 5, 0.98 * 310, 0.8],
29 | # High and low test values (made up numbers)
30 | [305, 0.80, 15, 0.98 * 310, 0.9200],
31 | [315, 0.10, 15, 0.98 * 310, 0.2533],
32 | # Test changing Tcorr
33 | [305, 0.80, 15, 0.983 * 310, 0.9820],
34 | [315, 0.10, 15, 0.985 * 310, 0.3566],
35 | # Central Valley test values
36 | [302, 0.80, 17, 0.985 * 308, 1.0], # Clamped to 1.0
37 | [327, 0.08, 17, 0.985 * 308, 0.0],
38 | ]
39 | )
40 | def test_Model_et_fraction_values(lst, ndvi, dt, tcold, expected, tol=0.0001):
41 | output = utils.constant_image_value(model.et_fraction(
42 | lst=ee.Image.constant(lst), tcold=tcold, dt=dt))
43 | assert abs(output['et_fraction'] - expected) <= tol
44 |
45 |
46 | @pytest.mark.parametrize(
47 | 'lst, dt, tcold, expected',
48 | [
49 | # The ETf mask limit was changed from 1.5 to 2.0 for gridded Tcorr
50 | [304, 10, 0.98 * 310, 0.98], # 0.98 ETf will not be clamped
51 | [303, 10, 0.98 * 310, 1.00], # 1.08 ETf will be clamped to 1.0
52 | [293, 10, 0.98 * 310, None], # 2.08 ETf should be set to None (>2.0)
53 | # The ETf mask limit was changed from 1.3 to 1.5 for gridded Tcorr
54 | # [302, 10, 0.98, 310, 1.05], # 1.18 ETf should be clamped to 1.05
55 | # [300, 10, 0.98, 310, 1.05], # 1.38 ETf should be clamped to 1.05
56 | # [298, 10, 0.98, 310, None], # 1.58 ETf should be set to None (>1.5)
57 | ]
58 | )
59 | def test_Model_et_fraction_clamp_nodata(lst, dt, tcold, expected):
60 | """Test that ETf is set to nodata for ETf > 2.0"""
61 | output_img = model.et_fraction(lst=ee.Image.constant(lst), tcold=tcold, dt=dt)
62 | output = utils.constant_image_value(ee.Image(output_img))
63 | if expected is None:
64 | assert output['et_fraction'] is None
65 | else:
66 | assert abs(output['et_fraction'] - expected) <= 0.000001
67 |
68 |
69 | @pytest.mark.parametrize(
70 | 'tmax, tmin, elev, doy, lat, expected',
71 | [
72 | # Test values are slightly different than in this old playground script
73 | # https://code.earthengine.google.com/8316e79baf5c2e3332913e5ec3224e92
74 | # 2015-07-13
75 | [309.1128, 292.6634, 68.4937, 194, 36.0405, 18.8347], # CIMIS
76 | [307.6500, 291.6500, 68.4937, 194, 36.0405, 18.5681], # DAYMET
77 | [307.3597, 291.8105, 68.4937, 194, 36.0405, 18.6148], # GRIDMET
78 |
79 | # 2017-07-16
80 | [313.5187, 292.2343, 18, 197, 39.1968, 18.3925], # CIMIS
81 | [313.1500, 293.6500, 18, 197, 39.1968, 18.8163], # DAYMET
82 | [312.3927, 293.2107, 18, 197, 39.1968, 18.6917], # GRIDMET
83 |
84 | ]
85 | )
86 | def test_Model_dt_calc_rso_no_ea(tmax, tmin, elev, doy, lat, expected, tol=0.0001):
87 | """Test dt calculation using Rso and Ea from Tmin"""
88 | dt = utils.getinfo(model.dt(
89 | tmax=ee.Number(tmax), tmin=ee.Number(tmin),
90 | elev=ee.Number(elev), rs=None, doy=ee.Number(doy), lat=ee.Number(lat)))
91 | assert abs(float(dt) - expected) <= tol
92 |
93 |
94 | @pytest.mark.parametrize(
95 | 'tmax, tmin, elev, doy, lat, rs, expected',
96 | [
97 | # Test values are slightly different than in this old playground script
98 | # https://code.earthengine.google.com/8316e79baf5c2e3332913e5ec3224e92
99 | # 2017-07-16
100 | [313.5187, 292.2343, 18, 197, 39.1968, 29.1144, 18.4785], # CIMIS
101 | [313.1500, 293.6500, 18, 197, 39.1968, 25.3831, 16.7078], # DAYMET
102 | [312.3927, 293.2107, 18, 197, 39.1968, 30.2915, 19.7663], # GRIDMET
103 | ]
104 | )
105 | def test_Model_dt_calc_rs_no_ea(tmax, tmin, elev, doy, lat, rs, expected, tol=0.0001):
106 | """Test dt calculation using measured Rs and Ea from Tmin"""
107 | dt = utils.getinfo(model.dt(
108 | tmax=ee.Number(tmax), tmin=ee.Number(tmin), elev=ee.Number(elev),
109 | rs=ee.Number(rs), doy=ee.Number(doy), lat=ee.Number(lat)))
110 | assert abs(float(dt) - expected) <= tol
111 |
112 |
113 | @pytest.mark.parametrize(
114 | 'tmax, tmin, elev, doy, lat, ea, expected',
115 | [
116 | # Test values are slightly different than in this old playground script
117 | # https://code.earthengine.google.com/8316e79baf5c2e3332913e5ec3224e92
118 | # 2017-07-16
119 | [313.5187, 292.2343, 18, 197, 39.1968, 1.6110, 17.0153], # CIMIS
120 | [313.1500, 293.6500, 18, 197, 39.1968, 0.9200, 15.0200], # DAYMET
121 | [312.3927, 293.2107, 18, 197, 39.1968, 1.6384, 17.0965], # GRIDMET
122 | ]
123 | )
124 | def test_Model_dt_calc_rso_ea(tmax, tmin, elev, doy, lat, ea, expected, tol=0.0001):
125 | """Test dt calculation using 'measured' Ea (from Tdew, sph, vp) and Rso"""
126 | dt = utils.getinfo(model.dt(
127 | tmax=ee.Number(tmax), tmin=ee.Number(tmin), elev=ee.Number(elev),
128 | ea=ee.Number(ea), doy=ee.Number(doy), lat=ee.Number(lat)))
129 | assert abs(float(dt) - expected) <= tol
130 |
131 |
132 | @pytest.mark.parametrize(
133 | 'tmax, tmin, elev, doy, lat, rs, ea, expected',
134 | [
135 | # Test values are slightly different than in this old playground script
136 | # https://code.earthengine.google.com/8316e79baf5c2e3332913e5ec3224e92
137 | # 2017-07-16
138 | [313.5187, 292.2343, 18, 197, 39.1968, 29.1144, 1.6110, 17.1013], # CIMIS
139 | [313.1500, 293.6500, 18, 197, 39.1968, 25.3831, 0.9200, 13.5525], # DAYMET
140 | [312.3927, 293.2107, 18, 197, 39.1968, 30.2915, 1.6384, 18.1711], # GRIDMET
141 | ]
142 | )
143 | def test_Model_dt_calc_rs_ea(tmax, tmin, elev, doy, lat, rs, ea, expected, tol=0.0001):
144 | """Test dt calculation using 'measured' Rs and Ea (from Tdew, sph, vp)"""
145 | dt = utils.getinfo(model.dt(
146 | tmax=ee.Number(tmax), tmin=ee.Number(tmin), elev=ee.Number(elev),
147 | rs=ee.Number(rs), ea=ee.Number(ea), doy=ee.Number(doy),
148 | lat=ee.Number(lat)))
149 | assert abs(float(dt) - expected) <= tol
150 |
151 |
152 | def test_Model_dt_doy_exception():
153 | with pytest.raises(ValueError):
154 | utils.getinfo(model.dt(tmax=313.15, tmin=293.65, elev=21.83, doy=None))
155 |
156 |
157 | def test_Model_etf_grass_type_adjust_parameters():
158 | """Check that the function parameter names and order don't change"""
159 | etf_img = (
160 | ee.Image(f'{COLL_ID}/{SCENE_ID}').select([0]).multiply(0).add(1.0)
161 | .rename(['et_fraction']).set('system:time_start', SCENE_TIME)
162 | )
163 | output = model.etf_grass_type_adjust(
164 | etf=etf_img, src_coll_id='NASA/NLDAS/FORA0125_H002',
165 | time_start=SCENE_TIME, resample_method='bilinear',
166 | )
167 | assert utils.point_image_value(output, SCENE_POINT, scale=100)['et_fraction'] > 1
168 |
169 | output = model.etf_grass_type_adjust(
170 | etf_img, 'NASA/NLDAS/FORA0125_H002', SCENE_TIME, 'bilinear'
171 | )
172 | assert utils.point_image_value(output, SCENE_POINT, scale=100)['et_fraction'] > 1
173 |
174 |
175 | @pytest.mark.parametrize(
176 | 'src_coll_id, resample_method, expected',
177 | [
178 | ['NASA/NLDAS/FORA0125_H002', 'nearest', 1.228],
179 | ['NASA/NLDAS/FORA0125_H002', 'bilinear', 1.232],
180 | ['ECMWF/ERA5_LAND/HOURLY', 'nearest', 1.156],
181 | ['ECMWF/ERA5_LAND/HOURLY', 'bilinear', 1.156],
182 | ]
183 | )
184 | def test_Model_etf_grass_type_adjust(src_coll_id, resample_method, expected, tol=0.001):
185 | """Check alfalfa to grass reference adjustment factor"""
186 | etf_img = (
187 | ee.Image(f'{COLL_ID}/{SCENE_ID}').select([0]).multiply(0).add(1.0)
188 | .rename(['et_fraction']).set('system:time_start', SCENE_TIME)
189 | )
190 | output = model.etf_grass_type_adjust(
191 | etf=etf_img, src_coll_id=src_coll_id, time_start=SCENE_TIME,
192 | resample_method=resample_method
193 | )
194 | output = utils.point_image_value(output, SCENE_POINT, scale=100)
195 | assert abs(output['et_fraction'] - expected) <= tol
196 |
197 |
198 | def test_Model_etf_grass_type_adjust_src_coll_id_exception():
199 | """Function should raise an exception for unsupported src_coll_id values"""
200 | with pytest.raises(ValueError):
201 | utils.getinfo(model.etf_grass_type_adjust(
202 | etf=ee.Image.constant(1), src_coll_id='DEADBEEF', time_start=SCENE_TIME
203 | ))
204 |
205 |
206 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/test_d_collection.py:
--------------------------------------------------------------------------------
1 | # import pprint
2 |
3 | import ee
4 | import pytest
5 |
6 | import openet.ssebop as ssebop
7 | import openet.ssebop.utils as utils
8 | # TODO: import utils from openet.core
9 | # import openet.core.utils as utils
10 |
11 | C02_COLLECTIONS = ['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2']
12 | # Image LE07_044033_20170724 is not (yet?) in LANDSAT/LE07/C02/T1_L2
13 | C02_SCENE_ID_LIST = ['LC08_044033_20170716', 'LE07_044033_20170708']
14 | START_DATE = '2017-07-01'
15 | END_DATE = '2017-08-01'
16 | SCENE_GEOM = (-121.91, 38.99, -121.89, 39.01)
17 | SCENE_POINT = (-121.9, 39)
18 | VARIABLES = {'et', 'et_fraction', 'et_reference'}
19 | TEST_POINT = (-121.5265, 38.7399)
20 |
21 |
22 | def default_coll_args():
23 | return {
24 | 'collections': C02_COLLECTIONS,
25 | 'geometry': ee.Geometry.Point(SCENE_POINT),
26 | 'start_date': START_DATE,
27 | 'end_date': END_DATE,
28 | 'variables': list(VARIABLES),
29 | 'cloud_cover_max': 70,
30 | 'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
31 | 'et_reference_band': 'etr',
32 | 'et_reference_factor': 0.85,
33 | 'et_reference_resample': 'nearest',
34 | 'et_reference_date_type': None,
35 | # 'et_reference_date_type': 'daily',
36 | 'model_args': {
37 | 'tcorr_source': 0.99,
38 | 'cloudmask_args': {'cloud_score_flag': False, 'filter_flag': False},
39 | },
40 | 'filter_args': {},
41 | }
42 |
43 |
44 | def default_coll_obj(**kwargs):
45 | args = default_coll_args().copy()
46 | args.update(kwargs)
47 | return ssebop.Collection(**args)
48 |
49 |
50 | def parse_scene_id(output_info):
51 | output = [x['properties']['system:index'] for x in output_info['features']]
52 | # Strip merge indices (this works for Landsat image IDs
53 | return sorted(['_'.join(x.split('_')[-3:]) for x in output])
54 |
55 |
56 | def test_Collection_init_default_parameters():
57 | """Test if init sets default parameters"""
58 | args = default_coll_args().copy()
59 | # These values are being set above but have defaults that need to be checked
60 | del args['et_reference_source']
61 | del args['et_reference_band']
62 | del args['et_reference_factor']
63 | del args['et_reference_resample']
64 | del args['et_reference_date_type']
65 | del args['variables']
66 |
67 | m = ssebop.Collection(**args)
68 | assert m.variables is None
69 | assert m.et_reference_source is None
70 | assert m.et_reference_band is None
71 | assert m.et_reference_factor is None
72 | assert m.et_reference_resample is None
73 | assert m.et_reference_date_type is None
74 | assert m.cloud_cover_max == 70
75 | # assert m.model_args == {'tcorr_source': 0.99}
76 | assert m.filter_args == {}
77 | assert set(m._interp_vars) == {'ndvi', 'et_fraction'}
78 |
79 |
80 | def test_Collection_init_collection_str(coll_id='LANDSAT/LC08/C02/T1_L2'):
81 | """Test if a single coll_id str is converted to a single item list"""
82 | assert default_coll_obj(collections=coll_id).collections == [coll_id]
83 |
84 |
85 | def test_Collection_init_cloud_cover_max_str():
86 | """Test if cloud_cover_max strings are converted to float"""
87 | assert default_coll_obj(cloud_cover_max='70').cloud_cover_max == 70
88 |
89 |
90 | @pytest.mark.parametrize(
91 | 'coll_id, start_date, end_date',
92 | [
93 | # ['LANDSAT/LT04/C02/T1_L2', '1981-01-01', '1982-01-01'],
94 | # ['LANDSAT/LT04/C02/T1_L2', '1994-01-01', '1995-01-01'],
95 | ['LANDSAT/LT05/C02/T1_L2', '1983-01-01', '1984-01-01'],
96 | ['LANDSAT/LT05/C02/T1_L2', '2012-01-01', '2013-01-01'],
97 | ['LANDSAT/LE07/C02/T1_L2', '1998-01-01', '1999-01-01'],
98 | ['LANDSAT/LE07/C02/T1_L2', '2022-01-01', '2023-01-01'],
99 | ['LANDSAT/LC08/C02/T1_L2', '2012-01-01', '2013-01-01'],
100 | ['LANDSAT/LC09/C02/T1_L2', '2021-01-01', '2022-01-01'],
101 | ]
102 | )
103 | def test_Collection_init_collection_filter(coll_id, start_date, end_date):
104 | """Test that collection IDs are filtered based on start/end dates"""
105 | # The target collection ID should be removed from the collections lists
106 | assert default_coll_obj(collections=coll_id, start_date=start_date,
107 | end_date=end_date).collections == []
108 |
109 |
110 | def test_Collection_init_startdate_exception():
111 | """Test if Exception is raised for invalid start date formats"""
112 | with pytest.raises(ValueError):
113 | default_coll_obj(start_date='1/1/2000', end_date='2000-01-02')
114 |
115 |
116 | def test_Collection_init_enddate_exception():
117 | """Test if Exception is raised for invalid end date formats"""
118 | with pytest.raises(ValueError):
119 | default_coll_obj(start_date='2000-01-01', end_date='1/2/2000')
120 |
121 |
122 | def test_Collection_init_swapped_date_exception():
123 | """Test if Exception is raised when start_date == end_date"""
124 | with pytest.raises(ValueError):
125 | default_coll_obj(start_date='2017-01-01', end_date='2017-01-01')
126 |
127 |
128 | def test_Collection_init_invalid_collections_exception():
129 | """Test if Exception is raised for an invalid collection ID"""
130 | with pytest.raises(ValueError):
131 | default_coll_obj(collections=['FOO'])
132 |
133 |
134 | # DEADBEEF - Not needed if not supporting collection 1
135 | # def test_Collection_init_duplicate_collections_exception():
136 | # """Test if Exception is raised for duplicate Landsat types"""
137 | # with pytest.raises(ValueError):
138 | # default_coll_obj(collections=['LANDSAT/LC08/C01/T1_RT_TOA',
139 | # 'LANDSAT/LC08/C01/T1_TOA'])
140 | # with pytest.raises(ValueError):
141 | # default_coll_obj(collections=['LANDSAT/LC08/C01/T1_SR',
142 | # 'LANDSAT/LC08/C01/T1_TOA'])
143 |
144 |
145 | def test_Collection_init_cloud_cover_exception():
146 | """Test if Exception is raised for an invalid cloud_cover_max"""
147 | with pytest.raises(TypeError):
148 | default_coll_obj(cloud_cover_max='A')
149 | with pytest.raises(ValueError):
150 | default_coll_obj(cloud_cover_max=-1)
151 | with pytest.raises(ValueError):
152 | default_coll_obj(cloud_cover_max=101)
153 |
154 |
155 | # # TODO: Test for Error if geometry is not ee.Geometry
156 | # def test_Collection_init_geometry_exception():
157 | # """Test if Exception is raised for an invalid geometry"""
158 | # args = default_coll_args()
159 | # args['geometry'] = 'DEADBEEF'
160 | # s = ssebop.Collection(**args)
161 | # assert utils.getinfo(s.geometry) ==
162 |
163 |
164 | # TODO: Test if a geojson string can be passed for the geometry
165 | # def test_Collection_init_geometry_geojson():
166 | # assert False
167 |
168 |
169 | def test_Collection_build_default():
170 | output = utils.getinfo(default_coll_obj()._build())
171 | assert output['type'] == 'ImageCollection'
172 | assert parse_scene_id(output) == C02_SCENE_ID_LIST
173 | # Check that the variables being set in the default collection object are returned
174 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
175 |
176 |
177 | def test_Collection_build_variables_custom(variable='ndvi'):
178 | # Check that setting the build variables overrides the collection variables
179 | output = utils.getinfo(default_coll_obj()._build(variables=[variable])
180 | .first().bandNames())
181 | assert set(output) == {variable}
182 |
183 |
184 | def test_Collection_build_variables_none():
185 | """Test for exception if variables is set to None in method call"""
186 | with pytest.raises(ValueError):
187 | utils.getinfo(default_coll_obj(variables=None)._build(variables=None))
188 |
189 |
190 | def test_Collection_build_variables_not_set():
191 | """Test for exception if variables is not set in method since default is None"""
192 | with pytest.raises(ValueError):
193 | utils.getinfo(default_coll_obj(variables=None)._build())
194 |
195 |
196 | def test_Collection_build_variables_empty_list():
197 | # Setting variables to an empty list should return the merged Landsat collection
198 | output = utils.getinfo(
199 | default_coll_obj(collections=C02_COLLECTIONS, variables=None)
200 | ._build(variables=[]).first().bandNames()
201 | )
202 | assert 'SR_B3' in output
203 |
204 |
205 | def test_Collection_build_invalid_variable_exception():
206 | """Test if Exception is raised for an invalid variable"""
207 | with pytest.raises(ValueError):
208 | utils.getinfo(default_coll_obj()._build(variables=['FOO']))
209 |
210 |
211 | def test_Collection_build_dates():
212 | """Check that dates passed to build function override Class dates"""
213 | coll_obj = default_coll_obj(start_date='2017-08-01', end_date='2017-09-01')
214 | output = utils.getinfo(coll_obj._build(
215 | start_date='2017-07-16', end_date='2017-07-17'))
216 | assert parse_scene_id(output) == ['LC08_044033_20170716']
217 |
218 |
219 | def test_Collection_build_landsat_c2_sr():
220 | """Test if the Landsat SR collections can be built"""
221 | coll_obj = default_coll_obj(
222 | collections=['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2'])
223 | output = utils.getinfo(coll_obj._build())
224 | assert parse_scene_id(output) == C02_SCENE_ID_LIST
225 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
226 |
227 |
228 | def test_Collection_build_exclusive_enddate():
229 | """Test if the end_date is exclusive"""
230 | output = utils.getinfo(default_coll_obj(end_date='2017-07-24')._build())
231 | assert [x for x in parse_scene_id(output) if int(x[-8:]) >= 20170724] == []
232 |
233 |
234 | def test_Collection_build_cloud_cover():
235 | """Test if the cloud cover max parameter is being applied"""
236 | # CGM - The filtered images should probably be looked up programmatically
237 | output = utils.getinfo(default_coll_obj(cloud_cover_max=0.5)._build(
238 | variables=['et']))
239 | assert 'LE07_044033_20170724' not in parse_scene_id(output)
240 |
241 |
242 | @pytest.mark.parametrize(
243 | 'collection, start_date, end_date',
244 | [
245 | ['LANDSAT/LT05/C02/T1_L2', '2012-01-01', '2013-01-01'],
246 | ]
247 | )
248 | def test_Collection_build_filter_dates_lt05(collection, start_date, end_date):
249 | """Test that bad Landsat 5 images are filtered"""
250 | output = utils.getinfo(default_coll_obj(
251 | collections=[collection], start_date=start_date, end_date=end_date,
252 | geometry=ee.Geometry.Rectangle(-125, 25, -65, 50))._build(variables=['et']))
253 | assert parse_scene_id(output) == []
254 |
255 |
256 | @pytest.mark.parametrize(
257 | 'collection, start_date, end_date',
258 | [
259 | ['LANDSAT/LE07/C02/T1_L2', '2022-01-01', '2023-01-01'],
260 | ]
261 | )
262 | def test_Collection_build_filter_dates_le07(collection, start_date, end_date):
263 | """Test that Landsat 7 images after 2021 are filtered"""
264 | output = utils.getinfo(default_coll_obj(
265 | collections=[collection], start_date=start_date, end_date=end_date,
266 | geometry=ee.Geometry.Rectangle(-125, 25, -65, 50))._build(variables=['et']))
267 | assert parse_scene_id(output) == []
268 |
269 |
270 | @pytest.mark.parametrize(
271 | 'collection, start_date, end_date',
272 | [
273 | ['LANDSAT/LC08/C02/T1_L2', '2013-01-01', '2013-04-01'],
274 | ]
275 | )
276 | def test_Collection_build_filter_dates_lc08(collection, start_date, end_date):
277 | """Test that pre-op Landsat 8 images before 2013-04-01 are filtered"""
278 | output = utils.getinfo(default_coll_obj(
279 | collections=[collection], start_date=start_date, end_date=end_date,
280 | geometry=ee.Geometry.Rectangle(-125, 25, -65, 50))._build(variables=['et']))
281 | assert not [x for x in parse_scene_id(output)
282 | if x.split('_')[-1] < end_date.replace('-', '')]
283 | assert parse_scene_id(output) == []
284 |
285 |
286 | @pytest.mark.parametrize(
287 | 'collection, start_date, end_date',
288 | [
289 | ['LANDSAT/LC09/C02/T1_L2', '2021-11-01', '2022-01-01'],
290 | ]
291 | )
292 | def test_Collection_build_filter_dates_lc09(collection, start_date, end_date):
293 | """Test that Landsat 9 images before 2022-01-01 are filtered"""
294 | output = utils.getinfo(default_coll_obj(
295 | collections=[collection], start_date=start_date, end_date=end_date,
296 | geometry=ee.Geometry.Rectangle(-125, 25, -65, 50))._build(variables=['et']))
297 | assert not [x for x in parse_scene_id(output)
298 | if x.split('_')[-1] < end_date.replace('-', '')]
299 | assert parse_scene_id(output) == []
300 |
301 |
302 | def test_Collection_build_filter_args_keyword():
303 | # Need to test with two collections to catch bug when deepcopy isn't used
304 | collections = ['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2']
305 | wrs2_filter = [
306 | {'type': 'equals', 'leftField': 'WRS_PATH', 'rightValue': 44},
307 | {'type': 'equals', 'leftField': 'WRS_ROW', 'rightValue': 33},
308 | ]
309 | coll_obj = default_coll_obj(
310 | collections=collections,
311 | geometry=ee.Geometry.Rectangle(-125, 35, -120, 40),
312 | filter_args={c: wrs2_filter for c in collections})
313 | output = utils.getinfo(coll_obj._build(variables=['et']))
314 | assert {x[5:11] for x in parse_scene_id(output)} == {'044033'}
315 |
316 |
317 | def test_Collection_build_filter_args_eeobject():
318 | # Need to test with two collections to catch bug when deepcopy isn't used
319 | collections = ['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2']
320 | wrs2_filter = ee.Filter.And(ee.Filter.equals('WRS_PATH', 44),
321 | ee.Filter.equals('WRS_ROW', 33))
322 | coll_obj = default_coll_obj(
323 | collections=collections,
324 | geometry=ee.Geometry.Rectangle(-125, 35, -120, 40),
325 | filter_args={c: wrs2_filter for c in collections})
326 | output = utils.getinfo(coll_obj._build(variables=['et']))
327 | assert {x[5:11] for x in parse_scene_id(output)} == {'044033'}
328 |
329 |
330 | # CGM - This argument is hard to differentiate from the generic keyword based
331 | # filter args (see test above) and is not that different than just building
332 | # a single composite filter, so it is not being supported for now.
333 | # def test_Collection_build_filter_args_list():
334 | # # Need to test with two collections to catch bug when deepcopy isn't used
335 | # collections = ['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2']
336 | # wrs2_filter = [ee.Filter.equals('WRS_PATH', 44),
337 | # ee.Filter.equals('WRS_ROW', 33)]
338 | # coll_obj = default_coll_obj(
339 | # collections=collections,
340 | # geometry=ee.Geometry.Rectangle(-125, 35, -120, 40),
341 | # filter_args={c: wrs2_filter for c in collections})
342 | # output = utils.getinfo(coll_obj._build(variables=['et']))
343 | # assert {x[5:11] for x in parse_scene_id(output)} == {'044033'}
344 |
345 |
346 | def test_Collection_overpass_default():
347 | """Test overpass method with default values (variables from Class init)"""
348 | output = utils.getinfo(default_coll_obj().overpass())
349 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
350 | assert parse_scene_id(output) == C02_SCENE_ID_LIST
351 |
352 |
353 | def test_Collection_overpass_class_variables():
354 | """Test that custom class variables are passed through to build function"""
355 | output = utils.getinfo(default_coll_obj(variables=['et']).overpass())
356 | assert {y['id'] for x in output['features'] for y in x['bands']} == {'et'}
357 |
358 |
359 | def test_Collection_overpass_method_variables():
360 | """Test that custom method variables are passed through to build function"""
361 | output = utils.getinfo(default_coll_obj().overpass(variables=['et']))
362 | assert {y['id'] for x in output['features'] for y in x['bands']} == {'et'}
363 |
364 |
365 | def test_Collection_overpass_no_variables_exception():
366 | """Test if Exception is raised if variables is not set in init or method"""
367 | with pytest.raises(ValueError):
368 | utils.getinfo(default_coll_obj(variables=[]).overpass())
369 |
370 |
371 | def test_Collection_interpolate_default():
372 | """Default t_interval should be custom"""
373 | output = utils.getinfo(default_coll_obj().interpolate())
374 | assert output['type'] == 'ImageCollection'
375 | assert parse_scene_id(output) == ['20170701']
376 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
377 |
378 |
379 | @pytest.mark.parametrize('use_joins', [True, False])
380 | def test_Collection_interpolate_use_joins(use_joins):
381 | """Only checking if the parameter is accepted and runs for now"""
382 | output = utils.getinfo(default_coll_obj().interpolate(use_joins=use_joins))
383 | assert output['type'] == 'ImageCollection'
384 | assert parse_scene_id(output) == ['20170701']
385 |
386 |
387 | def test_Collection_interpolate_variables_custom():
388 | output = utils.getinfo(default_coll_obj().interpolate(variables=['et']))
389 | assert [y['id'] for x in output['features'] for y in x['bands']] == ['et']
390 |
391 |
392 | def test_Collection_interpolate_t_interval_daily():
393 | """Test if the daily time interval parameter works
394 |
395 | Since end_date is exclusive last image date will be one day earlier
396 | """
397 | coll_obj = default_coll_obj(start_date='2017-07-01', end_date='2017-07-05')
398 | output = utils.getinfo(coll_obj.interpolate(t_interval='daily'))
399 | assert output['type'] == 'ImageCollection'
400 | assert parse_scene_id(output)[0] == '20170701'
401 | assert parse_scene_id(output)[-1] == '20170704'
402 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
403 |
404 |
405 | def test_Collection_interpolate_t_interval_monthly():
406 | """Test if the monthly time interval parameter works"""
407 | output = utils.getinfo(default_coll_obj().interpolate(t_interval='monthly'))
408 | assert output['type'] == 'ImageCollection'
409 | assert parse_scene_id(output) == ['201707']
410 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
411 |
412 |
413 | # CGM - Commenting out since it takes a really long time to run
414 | # This function could probably be tested for a shorter time period
415 | # def test_Collection_interpolate_t_interval_annual():
416 | # """Test if the annual time interval parameter works"""
417 | # coll_obj = default_coll_obj(start_date='2017-01-01', end_date='2018-01-01')
418 | # output = utils.getinfo(coll_obj.interpolate(t_interval='annual'))
419 | # assert output['type'] == 'ImageCollection'
420 | # assert parse_scene_id(output) == ['2017']
421 | # assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
422 |
423 |
424 | def test_Collection_interpolate_t_interval_custom():
425 | """Test if the custom time interval parameter works"""
426 | output = utils.getinfo(default_coll_obj().interpolate(t_interval='custom'))
427 | assert output['type'] == 'ImageCollection'
428 | assert parse_scene_id(output) == ['20170701']
429 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
430 |
431 |
432 | # TODO: Write test for annual interpolation with a date range that is too short
433 |
434 |
435 | # def test_Collection_interpolate_interp_days():
436 | # """Test if the interpolate interp_days parameter works"""
437 | # # Is there any way to test this without pulling values at a point?
438 |
439 |
440 | # NOTE: For the following tests the collection class is not being
441 | # re-instantiated for each test so it is necessary to clear the model_args
442 | def test_Collection_interpolate_et_reference_source_not_set():
443 | """Test if Exception is raised if et_reference_source is not set"""
444 | with pytest.raises(ValueError):
445 | utils.getinfo(default_coll_obj(
446 | et_reference_source=None, model_args={}).interpolate())
447 |
448 |
449 | def test_Collection_interpolate_et_reference_band_not_set():
450 | """Test if Exception is raised if et_reference_band is not set"""
451 | with pytest.raises(ValueError):
452 | utils.getinfo(default_coll_obj(
453 | et_reference_band=None, model_args={}).interpolate())
454 |
455 |
456 | def test_Collection_interpolate_et_reference_factor_not_set():
457 | """Test if Exception is raised if et_reference_factor is not set"""
458 | with pytest.raises(ValueError):
459 | utils.getinfo(default_coll_obj(
460 | et_reference_factor=None, model_args={}).interpolate())
461 |
462 |
463 | def test_Collection_interpolate_et_reference_factor_exception():
464 | """Test if Exception is raised if et_reference_factor is not a number or negative"""
465 | with pytest.raises(ValueError):
466 | utils.getinfo(default_coll_obj(
467 | et_reference_factor=-1, model_args={}).interpolate())
468 |
469 |
470 | # CGM - Resample is not working so commenting out for now
471 | # def test_Collection_interpolate_et_reference_resample_not_set():
472 | # """Test if Exception is raised if et_reference_resample is not set"""
473 | # with pytest.raises(ValueError):
474 | # utils.getinfo(default_coll_obj(
475 | # et_reference_resample=None, model_args={}).interpolate())
476 |
477 |
478 | def test_Collection_interpolate_et_reference_resample_exception():
479 | """Test if Exception is raised if et_reference_resample is not set"""
480 | with pytest.raises(ValueError):
481 | utils.getinfo(default_coll_obj(
482 | et_reference_resample='deadbeef', model_args={}).interpolate())
483 |
484 |
485 | def test_Collection_interpolate_et_reference_date_type_exception():
486 | """Test if Exception is raised if et_reference_factor is not a number or negative"""
487 | with pytest.raises(ValueError):
488 | utils.getinfo(default_coll_obj(
489 | et_reference_date_type='deadbeef', model_args={}).interpolate())
490 |
491 |
492 | def test_Collection_interpolate_et_reference_params_kwargs():
493 | """Test setting et_reference parameters in the Collection init args"""
494 | output = utils.getinfo(default_coll_obj(
495 | # collections=['LANDSAT/LC08/C02/T1_L2'],
496 | et_reference_source='IDAHO_EPSCOR/GRIDMET', et_reference_band='etr',
497 | et_reference_factor=0.5, et_reference_resample='bilinear',
498 | model_args={}).interpolate(use_joins=True))
499 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
500 | assert output['features'][0]['properties']['et_reference_factor'] == 0.5
501 | assert output['features'][0]['properties']['et_reference_resample'] == 'bilinear'
502 |
503 |
504 | def test_Collection_interpolate_et_reference_params_model_args():
505 | """Test setting et_reference parameters in the model_args"""
506 | output = utils.getinfo(default_coll_obj(
507 | et_reference_source=None, et_reference_band=None,
508 | et_reference_factor=None, et_reference_resample=None,
509 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
510 | 'et_reference_band': 'etr', 'et_reference_factor': 0.5,
511 | 'et_reference_resample': 'bilinear'}).interpolate(use_joins=True))
512 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
513 | assert output['features'][0]['properties']['et_reference_factor'] == 0.5
514 | assert output['features'][0]['properties']['et_reference_resample'] == 'bilinear'
515 |
516 |
517 | def test_Collection_interpolate_et_reference_params_interpolate_args():
518 | """Test setting et_reference parameters in the interpolate call"""
519 | et_reference_args = {'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
520 | 'et_reference_band': 'etr', 'et_reference_factor': 0.5,
521 | 'et_reference_resample': 'bilinear'}
522 | output = utils.getinfo(default_coll_obj(
523 | et_reference_source=None, et_reference_band=None,
524 | et_reference_factor=None, et_reference_resample=None,
525 | model_args={}).interpolate(use_joins=True, **et_reference_args))
526 | assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
527 | assert output['features'][0]['properties']['et_reference_factor'] == 0.5
528 | assert output['features'][0]['properties']['et_reference_resample'] == 'bilinear'
529 |
530 |
531 | def test_Collection_interpolate_t_interval_exception():
532 | """Test if Exception is raised for an invalid t_interval parameter"""
533 | with pytest.raises(ValueError):
534 | utils.getinfo(default_coll_obj().interpolate(t_interval='DEADBEEF'))
535 |
536 |
537 | def test_Collection_interpolate_interp_method_exception():
538 | """Test if Exception is raised for an invalid interp_method parameter"""
539 | with pytest.raises(ValueError):
540 | utils.getinfo(default_coll_obj().interpolate(interp_method='DEADBEEF'))
541 |
542 |
543 | def test_Collection_interpolate_interp_days_exception():
544 | """Test if Exception is raised for an invalid interp_days parameter"""
545 | with pytest.raises(ValueError):
546 | utils.getinfo(default_coll_obj().interpolate(interp_days=0))
547 |
548 |
549 | def test_Collection_interpolate_no_variables_exception():
550 | """Test if Exception is raised if variables is not set in init or method"""
551 | with pytest.raises(ValueError):
552 | utils.getinfo(default_coll_obj(variables=[]).interpolate())
553 |
554 |
555 | def test_Collection_interpolate_output_type_default():
556 | """Test if output_type parameter is defaulting to float"""
557 | test_vars = ['et', 'et_reference', 'et_fraction', 'ndvi', 'count']
558 | output = utils.getinfo(default_coll_obj(variables=test_vars).interpolate())
559 | output = output['features'][0]['bands']
560 | bands = {info['id']: i for i, info in enumerate(output)}
561 | assert output[bands['et']]['data_type']['precision'] == 'float'
562 | assert output[bands['et_reference']]['data_type']['precision'] == 'float'
563 | assert output[bands['et_fraction']]['data_type']['precision'] == 'float'
564 | assert output[bands['ndvi']]['data_type']['precision'] == 'float'
565 | assert output[bands['count']]['data_type']['precision'] == 'int'
566 |
567 |
568 | def test_Collection_interpolate_only_interpolate_images():
569 | """Test if count band is returned if no images in the date range"""
570 | variables = {'et', 'count'}
571 | output = utils.getinfo(default_coll_obj(
572 | collections=['LANDSAT/LC08/C02/T1_L2'],
573 | geometry=ee.Geometry.Point(-123.623, 44.745),
574 | start_date='2017-04-01', end_date='2017-04-30',
575 | variables=list(variables), cloud_cover_max=70).interpolate())
576 | assert {y['id'] for x in output['features'] for y in x['bands']} == variables
577 |
578 |
579 | def test_Collection_interpolate_daily_et_reference_date_type_doy(tol=0.01):
580 | """Test interpolating a daily collection using a reference ET climatology"""
581 | output_coll = default_coll_obj(
582 | collections=['LANDSAT/LC08/C02/T1_L2'],
583 | geometry=ee.Geometry.Point(TEST_POINT),
584 | # CGM - Testing the full month returns a memory error
585 | start_date='2017-07-01', end_date='2017-07-10',
586 | # start_date=START_DATE, end_date=END_DATE,
587 | variables=['et_reference'],
588 | et_reference_source='projects/usgs-ssebop/pet/gridmet_median_v1',
589 | et_reference_band='etr', et_reference_factor=1.0,
590 | et_reference_resample='nearest', et_reference_date_type='doy',
591 | ).interpolate(t_interval='daily')
592 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=10)
593 | assert abs(output['et_reference']['2017-07-01'] - 8.75) <= tol
594 | # assert abs(output['et_reference'][START_DATE] - 8.75) <= tol
595 |
596 |
597 | def test_Collection_interpolate_monthly_et_reference_date_type_doy(tol=0.01):
598 | """Test interpolating a monthly collection using a reference ET climatology"""
599 | output_coll = default_coll_obj(
600 | collections=['LANDSAT/LC08/C02/T1_L2'],
601 | geometry=ee.Geometry.Point(TEST_POINT),
602 | start_date=START_DATE, end_date=END_DATE,
603 | variables=['et_reference'],
604 | et_reference_source='projects/usgs-ssebop/pet/gridmet_median_v1',
605 | et_reference_band='etr', et_reference_factor=1.0,
606 | et_reference_resample='nearest', et_reference_date_type='doy',
607 | ).interpolate(t_interval='monthly')
608 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=10)
609 | assert abs(output['et_reference'][START_DATE] - 291.56) <= tol
610 |
611 |
612 | @pytest.mark.parametrize(
613 | 'collections, scene_id_list',
614 | [
615 | [['LANDSAT/LC08/C02/T1_L2', 'LANDSAT/LE07/C02/T1_L2'], C02_SCENE_ID_LIST],
616 | ]
617 | )
618 | def test_Collection_get_image_ids(collections, scene_id_list):
619 | # get_image_ids method makes a getInfo call internally
620 | output = default_coll_obj(collections=collections, variables=None).get_image_ids()
621 | assert type(output) is list
622 | assert set(x.split('/')[-1] for x in output) == set(scene_id_list)
623 |
--------------------------------------------------------------------------------
/openet/ssebop/tests/test_d_interpolate.py:
--------------------------------------------------------------------------------
1 | # import pprint
2 |
3 | import ee
4 | import pytest
5 |
6 | import openet.ssebop.interpolate as interpolate
7 | import openet.ssebop.utils as utils
8 |
9 |
10 | def scene_coll(variables, etf=[0.4, 0.4, 0.4], et=[5, 5, 5], ndvi=[0.6, 0.6, 0.6]):
11 | """Return a generic scene collection to test scene interpolation functions
12 |
13 | Parameters
14 | ----------
15 | variables : list
16 | The variables to return in the collection
17 | et_fraction : list
18 | et : list
19 | ndvi : list
20 |
21 | Returns
22 | -------
23 | ee.ImageCollection
24 |
25 | """
26 | img = (
27 | ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_044033_20170716')
28 | .select(['SR_B3']).double().multiply(0)
29 | )
30 |
31 | # The "time" is advanced to match the typical Landsat overpass time
32 | time1 = ee.Number(ee.Date.fromYMD(2017, 7, 8).advance(18, 'hours').millis())
33 | time2 = ee.Number(ee.Date.fromYMD(2017, 7, 16).advance(18, 'hours').millis())
34 | time3 = ee.Number(ee.Date.fromYMD(2017, 7, 24).advance(18, 'hours').millis())
35 |
36 | # TODO: Add code to convert et, et_fraction, and ndvi to lists if they
37 | # are set as a single value
38 |
39 | # Don't add mask or time band to scene collection
40 | # since they are now added in the interpolation calls
41 | scene_coll = ee.ImageCollection.fromImages([
42 | ee.Image([img.add(etf[0]), img.add(et[0]), img.add(ndvi[0])])
43 | .rename(['et_fraction', 'et', 'ndvi'])
44 | .set({'system:index': 'LE07_044033_20170708', 'system:time_start': time1}),
45 | ee.Image([img.add(etf[1]), img.add(et[1]), img.add(ndvi[1])])
46 | .rename(['et_fraction', 'et', 'ndvi'])
47 | .set({'system:index': 'LC08_044033_20170716', 'system:time_start': time2}),
48 | ee.Image([img.add(etf[2]), img.add(et[2]), img.add(ndvi[2])])
49 | .rename(['et_fraction', 'et', 'ndvi'])
50 | .set({'system:index': 'LE07_044033_20170724', 'system:time_start': time3}),
51 | ])
52 |
53 | return scene_coll.select(variables)
54 |
55 |
56 | def test_from_scene_et_fraction_t_interval_daily_values(tol=0.0001):
57 | output_coll = interpolate.from_scene_et_fraction(
58 | scene_coll(['et_fraction', 'ndvi'], ndvi=[0.6, 0.6, 0.6]),
59 | start_date='2017-07-01',
60 | end_date='2017-08-01',
61 | variables=['et', 'et_reference', 'et_fraction', 'ndvi'],
62 | interp_args={'interp_method': 'linear', 'interp_days': 32},
63 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
64 | 'et_reference_band': 'etr',
65 | 'et_reference_resample': 'nearest'},
66 | t_interval='daily',
67 | )
68 |
69 | TEST_POINT = (-121.5265, 38.7399)
70 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
71 | assert abs(output['ndvi']['2017-07-10'] - 0.6) <= tol
72 | assert abs(output['et_fraction']['2017-07-10'] - 0.4) <= tol
73 | assert abs(output['et_reference']['2017-07-10'] - 10.5) <= tol
74 | assert abs(output['et']['2017-07-10'] - (10.5 * 0.4)) <= tol
75 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
76 | assert abs(output['et_fraction']['2017-07-31'] - 0.4) <= tol
77 | assert '2017-08-01' not in output['et_fraction'].keys()
78 | # assert output['count']['2017-07-01'] == 3
79 |
80 |
81 | def test_from_scene_et_fraction_t_interval_monthly_values(tol=0.0001):
82 | output_coll = interpolate.from_scene_et_fraction(
83 | scene_coll(['et_fraction', 'ndvi']),
84 | start_date='2017-07-01',
85 | end_date='2017-08-01',
86 | variables=['et', 'et_reference', 'et_fraction', 'ndvi', 'count'],
87 | interp_args={'interp_method': 'linear', 'interp_days': 32},
88 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
89 | 'et_reference_band': 'etr',
90 | 'et_reference_resample': 'nearest'},
91 | t_interval='monthly',
92 | )
93 |
94 | TEST_POINT = (-121.5265, 38.7399)
95 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
96 | assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol
97 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
98 | assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
99 | assert abs(output['et']['2017-07-01'] - (310.3 * 0.4)) <= tol
100 | assert output['count']['2017-07-01'] == 3
101 |
102 |
103 | def test_from_scene_et_fraction_t_interval_custom_values(tol=0.0001):
104 | output_coll = interpolate.from_scene_et_fraction(
105 | scene_coll(['et_fraction', 'ndvi']),
106 | start_date='2017-07-01',
107 | end_date='2017-08-01',
108 | variables=['et', 'et_reference', 'et_fraction', 'ndvi', 'count'],
109 | interp_args={'interp_method': 'linear', 'interp_days': 32},
110 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
111 | 'et_reference_band': 'etr',
112 | 'et_reference_resample': 'nearest'},
113 | t_interval='custom',
114 | )
115 |
116 | TEST_POINT = (-121.5265, 38.7399)
117 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
118 | assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol
119 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
120 | assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
121 | assert abs(output['et']['2017-07-01'] - (310.3 * 0.4)) <= tol
122 | assert output['count']['2017-07-01'] == 3
123 |
124 |
125 | def test_from_scene_et_fraction_t_interval_custom_daily_count(tol=0.0001):
126 | output_coll = interpolate.from_scene_et_fraction(
127 | scene_coll(['et_fraction', 'ndvi']),
128 | start_date='2017-07-01',
129 | end_date='2017-08-01',
130 | variables=['et_fraction', 'daily_count'],
131 | interp_args={'interp_method': 'linear', 'interp_days': 32},
132 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
133 | 'et_reference_band': 'etr',
134 | 'et_reference_resample': 'nearest'},
135 | t_interval='custom',
136 | )
137 |
138 | TEST_POINT = (-121.5265, 38.7399)
139 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
140 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
141 | assert output['daily_count']['2017-07-01'] == 31
142 |
143 |
144 | def test_from_scene_et_fraction_t_interval_monthly_et_reference_factor(tol=0.0001):
145 | output_coll = interpolate.from_scene_et_fraction(
146 | scene_coll(['et_fraction', 'ndvi']),
147 | start_date='2017-07-01',
148 | end_date='2017-08-01',
149 | variables=['et', 'et_reference', 'et_fraction', 'ndvi', 'count'],
150 | interp_args={'interp_method': 'linear', 'interp_days': 32},
151 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
152 | 'et_reference_band': 'etr',
153 | 'et_reference_factor': 0.5,
154 | 'et_reference_resample': 'nearest'},
155 | t_interval='monthly',
156 | )
157 |
158 | TEST_POINT = (-121.5265, 38.7399)
159 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
160 | assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol
161 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
162 | assert abs(output['et_reference']['2017-07-01'] - 310.3 * 0.5) <= tol
163 | assert abs(output['et']['2017-07-01'] - (310.3 * 0.5 * 0.4)) <= tol
164 | assert output['count']['2017-07-01'] == 3
165 |
166 |
167 | def test_from_scene_et_fraction_t_interval_monthly_et_reference_resample(tol=0.0001):
168 | output_coll = interpolate.from_scene_et_fraction(
169 | scene_coll(['et_fraction', 'ndvi']),
170 | start_date='2017-07-01',
171 | end_date='2017-08-01',
172 | variables=['et', 'et_reference', 'et_fraction', 'ndvi', 'count'],
173 | interp_args={'interp_method': 'linear', 'interp_days': 32},
174 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
175 | 'et_reference_band': 'etr',
176 | 'et_reference_resample': 'bilinear'},
177 | t_interval='monthly',
178 | )
179 |
180 | TEST_POINT = (-121.5265, 38.7399)
181 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
182 | assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol
183 | assert abs(output['et_fraction']['2017-07-01'] - 0.4) <= tol
184 | # CGM - ETo (and ET) test values are slightly different with bilinear resampling
185 | # but ET fraction should be the same
186 | assert abs(output['et_reference']['2017-07-01'] - 309.4239807128906) <= tol
187 | assert abs(output['et']['2017-07-01'] - (309.4239807128906 * 0.4)) <= tol
188 | # assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
189 | # assert abs(output['et']['2017-07-01'] - (310.3 * 0.4)) <= tol
190 | assert output['count']['2017-07-01'] == 3
191 |
192 |
193 | def test_from_scene_et_fraction_t_interval_monthly_et_reference_date_type_doy(tol=0.01):
194 | # Check that et_reference_date_type 'doy' parameter works with a reference ET climatology
195 | output_coll = interpolate.from_scene_et_fraction(
196 | scene_coll(['et_fraction', 'ndvi']),
197 | start_date='2017-07-01',
198 | end_date='2017-08-01',
199 | variables=['et_reference'],
200 | interp_args={'interp_method': 'linear', 'interp_days': 32},
201 | model_args={'et_reference_source': 'projects/usgs-ssebop/pet/gridmet_median_v1',
202 | 'et_reference_band': 'etr',
203 | 'et_reference_resample': 'nearest',
204 | 'et_reference_date_type': 'doy'},
205 | t_interval='monthly',
206 | )
207 |
208 | TEST_POINT = (-121.5265, 38.7399)
209 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
210 | assert abs(output['et_reference']['2017-07-01'] - 291.56) <= tol
211 |
212 |
213 | def test_from_scene_et_fraction_t_interval_monthly_et_reference_date_type_daily(tol=0.01):
214 | # Check that et_reference_date_type 'daily' parameter works with a reference ET collection
215 | output_coll = interpolate.from_scene_et_fraction(
216 | scene_coll(['et_fraction', 'ndvi']),
217 | start_date='2017-07-01',
218 | end_date='2017-08-01',
219 | variables=['et_reference'],
220 | interp_args={'interp_method': 'linear', 'interp_days': 32},
221 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
222 | 'et_reference_band': 'etr',
223 | 'et_reference_resample': 'nearest',
224 | 'et_reference_date_type': 'daily'},
225 | t_interval='monthly',
226 | )
227 |
228 | TEST_POINT = (-121.5265, 38.7399)
229 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
230 | assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
231 |
232 |
233 | def test_from_scene_et_fraction_t_interval_bad_value():
234 | # Function should raise a ValueError if t_interval is not supported
235 | with pytest.raises(ValueError):
236 | interpolate.from_scene_et_fraction(
237 | scene_coll(['et']),
238 | start_date='2017-07-01',
239 | end_date='2017-08-01',
240 | variables=['et'],
241 | interp_args={'interp_method': 'linear', 'interp_days': 32},
242 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
243 | 'et_reference_band': 'etr',
244 | 'et_reference_resample': 'nearest'},
245 | t_interval='deadbeef',
246 | )
247 |
248 |
249 | def test_from_scene_et_fraction_t_interval_no_value():
250 | # Function should raise an Exception if t_interval is not set
251 | with pytest.raises(TypeError):
252 | interpolate.from_scene_et_fraction(
253 | scene_coll(['et']),
254 | start_date='2017-07-01',
255 | end_date='2017-08-01',
256 | variables=['et', 'et_reference', 'et_fraction', 'count'],
257 | interp_args={'interp_method': 'linear', 'interp_days': 32},
258 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
259 | 'et_reference_band': 'etr',
260 | 'et_reference_resample': 'nearest'},
261 | )
262 |
263 |
264 | def test_from_scene_et_fraction_interp_args_use_joins_true(tol=0.01):
265 | # Check that the use_joins interp_args parameter works
266 | output_coll = interpolate.from_scene_et_fraction(
267 | scene_coll(['et_fraction']),
268 | start_date='2017-07-01',
269 | end_date='2017-08-01',
270 | variables=['et', 'et_reference'],
271 | interp_args={'interp_method': 'linear', 'interp_days': 32, 'use_joins': True},
272 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
273 | 'et_reference_band': 'etr',
274 | 'et_reference_resample': 'nearest'},
275 | t_interval='monthly',
276 | )
277 |
278 | TEST_POINT = (-121.5265, 38.7399)
279 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
280 | assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
281 | assert abs(output['et']['2017-07-01'] - (310.3 * 0.4)) <= tol
282 |
283 |
284 | def test_from_scene_et_fraction_interp_args_use_joins_false(tol=0.01):
285 | # Check that the use_joins interp_args parameter works
286 | output_coll = interpolate.from_scene_et_fraction(
287 | scene_coll(['et_fraction']),
288 | start_date='2017-07-01',
289 | end_date='2017-08-01',
290 | variables=['et', 'et_reference'],
291 | interp_args={'interp_method': 'linear', 'interp_days': 32, 'use_joins': False},
292 | model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET',
293 | 'et_reference_band': 'etr',
294 | 'et_reference_resample': 'nearest'},
295 | t_interval='monthly',
296 | )
297 |
298 | TEST_POINT = (-121.5265, 38.7399)
299 | output = utils.point_coll_value(output_coll, TEST_POINT, scale=30)
300 | assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol
301 | assert abs(output['et']['2017-07-01'] - (310.3 * 0.4)) <= tol
302 |
--------------------------------------------------------------------------------
/openet/ssebop/utils.py:
--------------------------------------------------------------------------------
1 | import calendar
2 | import datetime
3 | import logging
4 | from time import sleep
5 |
6 | import ee
7 |
8 |
9 | def getinfo(ee_obj, n=4):
10 | """Make an exponential back off getInfo call on an Earth Engine object"""
11 | output = None
12 | for i in range(1, n):
13 | try:
14 | output = ee_obj.getInfo()
15 | except ee.ee_exception.EEException as e:
16 | logging.info(f' Resending query ({i}/{n})')
17 | logging.info(f' {e}')
18 | sleep(i ** 3)
19 | # if ('Earth Engine memory capacity exceeded' in str(e) or
20 | # 'Earth Engine capacity exceeded' in str(e)):
21 | # logging.info(f' Resending query ({i}/{n})')
22 | # logging.debug(f' {e}')
23 | # sleep(i ** 2)
24 | # else:
25 | # raise e
26 | except Exception as e:
27 | logging.exception('Unhandled exception on getInfo call')
28 | raise e
29 |
30 | if output:
31 | break
32 |
33 | return output
34 |
35 |
36 | # TODO: Import from openet.core.utils instead of defining here
37 | # Should these be test fixtures instead?
38 | # I'm not sure how to make them fixtures and allow input parameters
39 | def constant_image_value(image, crs='EPSG:32613', scale=1):
40 | """Extract the output value from a calculation done with constant images"""
41 | rr_params = {
42 | 'reducer': ee.Reducer.first(),
43 | 'geometry': ee.Geometry.Rectangle([0, 0, 10, 10], crs, False),
44 | 'scale': scale,
45 | }
46 | return getinfo(ee.Image(image).reduceRegion(**rr_params))
47 |
48 |
49 | def point_image_value(image, xy, scale=1):
50 | """Extract the output value from a calculation at a point"""
51 | rr_params = {
52 | 'reducer': ee.Reducer.first(),
53 | 'geometry': ee.Geometry.Point(xy),
54 | 'scale': scale,
55 | }
56 | return getinfo(ee.Image(image).reduceRegion(**rr_params))
57 |
58 |
59 | def point_coll_value(coll, xy, scale=1):
60 | """Extract the output value from a calculation at a point"""
61 | output = getinfo(coll.getRegion(ee.Geometry.Point(xy), scale=scale))
62 |
63 | # Structure output to easily be converted to a Pandas dataframe
64 | # First key is band name, second key is the date string
65 | col_dict = {}
66 | info_dict = {}
67 | for i, k in enumerate(output[0][4:]):
68 | col_dict[k] = i + 4
69 | info_dict[k] = {}
70 | for row in output[1:]:
71 | date = datetime.datetime.utcfromtimestamp(row[3] / 1000.0).strftime('%Y-%m-%d')
72 | for k, v in col_dict.items():
73 | info_dict[k][date] = row[col_dict[k]]
74 |
75 | return info_dict
76 | # return pd.DataFrame.from_dict(info_dict)
77 |
78 |
79 | def c_to_k(image):
80 | """Convert temperature image from C to K
81 |
82 | Parameters
83 | ----------
84 | image : ee.Image
85 |
86 | Returns
87 | -------
88 | ee.Image
89 |
90 | """
91 | return image.add(273.15)
92 |
93 |
94 | def date_0utc(date):
95 | """Get the 0 UTC time_start for a date
96 |
97 | Parameters
98 | ----------
99 | date : ee.Date
100 |
101 | Returns
102 | -------
103 | ee.Number
104 |
105 | """
106 | return ee.Date.fromYMD(date.get('year'), date.get('month'), date.get('day'))
107 |
108 |
109 | def is_number(x):
110 | try:
111 | float(x)
112 | return True
113 | except:
114 | return False
115 |
116 |
117 | def millis(input_dt):
118 | """Convert datetime to milliseconds since epoch
119 |
120 | Parameters
121 | ----------
122 | input_dt : datetime
123 |
124 | Returns
125 | -------
126 | int
127 |
128 | """
129 | return 1000 * int(calendar.timegm(input_dt.timetuple()))
130 |
131 |
132 | def valid_date(date_str, date_fmt='%Y-%m-%d'):
133 | """Check if a datetime can be built from a date string and if it is valid"""
134 | try:
135 | datetime.datetime.strptime(date_str, date_fmt)
136 | return True
137 | except:
138 | return False
139 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "openet-ssebop"
3 | version = "0.7.0"
4 | authors = [
5 | { name = "Gabe Parrish", email = "gparrish@contractor.usgs.gov" },
6 | { name = "Mac Friedrichs", email = "mfriedrichs@contractor.usgs.gov" },
7 | { name = "Gabriel Senay", email = "senay@usgs.gov" },
8 | ]
9 | maintainers = [
10 | { name = "Charles Morton", email = "charles.morton@dri.edu" }
11 | ]
12 | description = "Earth Engine implementation of the SSEBop model"
13 | readme = "README.rst"
14 | requires-python = ">=3.9"
15 | keywords = ["SSEBop", "OpenET", "Earth Engine", "Evapotranspiration", "Landsat"]
16 | license = { file = "LICENSE.txt" }
17 | # license = {text = "Apache-2.0"}
18 | classifiers = [
19 | "Programming Language :: Python :: 3",
20 | "License :: OSI Approved :: Apache Software License",
21 | "Operating System :: OS Independent",
22 | ]
23 | dependencies = [
24 | "earthengine-api >= 1.5.2",
25 | "openet-core >= 0.7.0",
26 | "openet-refet-gee >= 0.6.2",
27 | "python-dateutil",
28 | ]
29 |
30 | [project.urls]
31 | "Homepage" = "https://github.com/Open-ET/openet-ssebop"
32 |
33 | [build-system]
34 | requires = ["setuptools>=61.0"]
35 | build-backend = "setuptools.build_meta"
36 |
37 | [project.optional-dependencies]
38 | test = ["pytest"]
39 |
40 | [tool.setuptools.packages.find]
41 | # include = ["openet*"]
42 | exclude = ["docs*", "examples*", "assets*", "build*", "dist*", "*.egg-info*"]
43 |
44 |
--------------------------------------------------------------------------------