├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── 01-Usage-Examples.ipynb ├── 01c_Usage_Examples_Colab.ipynb ├── 02-Dataset-Generation.ipynb ├── LICENSE ├── README.md ├── __init__.py ├── imgs ├── back_blur.png ├── basic_cloud.png ├── channel_offset.png ├── clean.tif ├── cloud_color.png ├── cloudy.tif ├── foggy_cloud.png ├── inverted_cloud.png ├── mask.tif ├── rgb_example.png ├── thick_cloud.png └── thin_fog.png ├── setup.py └── src ├── CloudSimulator.py ├── LocalGaussianBlur ├── LocalGaussianBlur.py ├── README.md ├── __init__.py └── src │ ├── LocalGaussianBlur.py │ └── __init__.py ├── __init__.py ├── band_magnitudes.py ├── configs.py ├── extras.py └── noise.py /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package to PyPI when a Release is Created 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | pypi-publish: 9 | name: Publish release to PyPI 10 | runs-on: ubuntu-latest 11 | environment: 12 | name: pypi 13 | url: https://pypi.org/p/satellite-cloud-generator 14 | permissions: 15 | id-token: write 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: "3.x" 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install setuptools wheel 26 | - name: Build package 27 | run: | 28 | python setup.py sdist bdist_wheel # Could also be python -m build 29 | - name: Publish package distributions to PyPI 30 | uses: pypa/gh-action-pypi-publish@release/v1 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Custom 2 | data/ 3 | *log 4 | *logs 5 | external/ 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | pip-wheel-metadata/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # :cloud: Satellite Cloud Generator 2 | [![MDPI](https://img.shields.io/badge/Open_Access-MDPI-green)](https://www.mdpi.com/2072-4292/15/17/4138) [![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/cidcom/SatelliteCloudGenerator/blob/main/01c_Usage_Examples_Colab.ipynb) [![Zenodo](https://zenodo.org/badge/532972529.svg)](https://zenodo.org/badge/latestdoi/532972529) 3 | [![[YouTube]](https://img.shields.io/badge/YouTube-%23FF0000.svg?style=flat&logo=youtube&logoColor=white)](https://youtu.be/RgAF2Y4O9zA) 4 | 5 | :star: **NEW:** This tool has been featured in the 📺 first episode of the [**satellite-image-deep-learning podcast**](https://youtu.be/RgAF2Y4O9zA)! :star: 6 | 7 | --- 8 | 9 | A PyTorch-based tool for simulating clouds in satellite images. 10 | 11 | This tool allows for generating artificial clouds in an image using structural noise, such as Perlin noise; intended for applications where pairs of clear-sky and cloudy images are required or useful. 12 | For example, it can be used to **generate training data** for tasks such as **cloud detection** or **cloud removal**, or simply as a method of **augmentation** of satellite image data for other tasks. 13 | 14 | The images must be in shape `(channel, height, width)` or `(batch, channel, height, width)` and are also returned in that format. 15 | 16 | --- 17 | 18 | ### Open Access Journal 19 | This tool is accompanied by the **open access publication** at https://www.mdpi.com/2072-4292/15/17/4138. 20 | 21 | If you found this tool useful, please cite accordingly: 22 | ```bibtex 23 | @Article{rs15174138, 24 | author = {Czerkawski, Mikolaj and Atkinson, Robert and Michie, Craig and Tachtatzis, Christos}, 25 | title = {SatelliteCloudGenerator: Controllable Cloud and Shadow Synthesis for Multi-Spectral Optical Satellite Images}, 26 | journal = {Remote Sensing}, 27 | volume = {15}, 28 | year = {2023}, 29 | number = {17}, 30 | article-number = {4138}, 31 | url = {https://www.mdpi.com/2072-4292/15/17/4138}, 32 | issn = {2072-4292}, 33 | doi = {10.3390/rs15174138} 34 | } 35 | ``` 36 | 37 | ### Installation 38 | ```bash 39 | pip install git+https://github.com/strath-ai/SatelliteCloudGenerator 40 | ``` 41 | 42 | and then import: 43 | ```python 44 | import satellite_cloud_generator as scg 45 | 46 | cloudy_img = scg.add_cloud_and_shadow(clear_img) 47 | ``` 48 | 49 | ## :gear: Usage 50 | Basic usage, takes a `clear` image and returns a `cloudy` version along with a corresponding channel-specific transparency `mask`: 51 | ```python 52 | cloudy, mask = scg.add_cloud(clear, 53 | min_lvl=0.0, 54 | max_lvl=1.0 55 | ) 56 | ``` 57 | ...resulting in the following: 58 | 59 | ![Basic Example](imgs/thick_cloud.png) 60 | 61 | The `min_lvl` and `max_lvl` control the range of values of the transparency `mask`. 62 | 63 | ### Generator Module 64 | You can also use a `CloudGenerator` object that binds a specific configuration (or a set of configurations) with the wrapped generation methods: 65 | ```python 66 | my_gen=scg.CloudGenerator(scg.WIDE_CONFIG,cloud_p=1.0,shadow_p=0.5) 67 | my_gen(my_image) # will act just like add_cloud_and_shadow() but will preserve the same configuration! 68 | ``` 69 | 70 | ## Selected Features (There's more! Scroll down for full list) 71 | Apart from synthesizing a random cloud, the tool provides several additional features (switched on by default) to make the appearance of the clouds more realistic, inspired by [(Lee2019)](https://ieeexplore.ieee.org/document/8803666). 72 | 73 | ### 1. Cloud Color 74 | The `cloud_color` setting adjusts the color of the base added cloud based on the mean color of the clear ground image. (Disable by passing `cloud_color=False`) 75 | 76 | ![Cloud Color](imgs/cloud_color.png) 77 | --- 78 | ### 2. Channel Offset 79 | Spatial offsets between individual cloud image channels can be achieved by setting `channel_offset` to a positive integer value. (Disable by passing `channel_offset=0`) 80 | 81 | ![Channel Offset](imgs/channel_offset.png) 82 | --- 83 | ### 3. Blur-Under-the-Cloud 84 | Blurring of the ground image based on the cloud thickness can be achieved by adjusting the `blur_scaling` parameter (with `0.0` disabling the effect). (Disable by passing `blur_scaling=0`) 85 | > :warning: The blur operation significantly increases memory footprint (caused by the internal `unfold` operation). 86 | 87 | ![Blur](imgs/back_blur.png) 88 | 89 | ## Summary of Parameters 90 | ```python 91 | def add_cloud_and_shadow(input, 92 | max_lvl=(0.95,1.0), 93 | min_lvl=(0.0, 0.05), 94 | channel_magnitude=None, 95 | shadow_max_lvl=[0.3,0.6], 96 | clear_threshold=0.0, 97 | noise_type = 'perlin', 98 | const_scale=True, 99 | decay_factor=1, 100 | locality_degree=1, 101 | channel_offset=2, 102 | channel_magnitude_shift=0.05, 103 | blur_scaling=2.0, 104 | cloud_color=True, 105 | return_cloud=False 106 | ): 107 | """ Takes an input image of shape [batch,channels,height, width] 108 | and returns a generated cloudy version of the input image, with additional shadows added to the ground image""" 109 | ``` 110 | 111 | | Argument | Description | Default value | 112 | |:---:|:---|:---| 113 | | input (Tensor) | input image in shape [B,C,H,W]| | 114 | |max_lvl (float or tuple of floats) | Indicates the maximum strength of the cloud (1.0 means that some pixels will be fully non-transparent)|`(0.95,1.0)`| 115 | |min_lvl (float or tuple of floats)| Indicates the minimum strength of the cloud (0.0 means that some pixels will have no cloud)|`(0.0, 0.05)`| 116 | |channel_magnitude (Tensor) | (optional) cloud magnitudes in each channel, shape [B,C,1,1]|`None`| 117 | |clear_threshold (float)|An optional threshold for cutting off some part of the initial generated cloud mask|`0.0`| 118 | |shadow_max_lvl (float)|Indicates the maximum strength of the cloud (1.0 means that some pixels will be completely black)|`[0.3,0.6]`| 119 | |noise_type (string: 'perlin', 'flex')|Method of noise generation (currently supported: 'perlin', 'flex')|`'perlin'`| 120 | |const_scale (bool)|If True, the spatial frequencies of the cloud/shadow shape are scaled based on the image size (this makes the cloud preserve its appearance regardless of image resolution)|`True`| 121 | |decay_factor (float)|decay factor that narrows the spectrum of the generated noise (higher values, such as 2.0 will reduce the amplitude of high spatial frequencies, yielding a 'blurry' cloud)|`1`| 122 | |locality degree (int)|more local clouds shapes can be achieved by multiplying several random cloud shapes with each other (value of 1 disables this effect, and higher integers correspond to the number of multiplied masks)|`1`| 123 | |channel_offset (int)|optional offset that can randomly misalign spatially the individual cloud mask channels (by a value in range -channel_offset and +channel_offset)|`2`| 124 | |blur_scaling (float)|Scaling factor for the variance of locally varying Gaussian blur (dependent on cloud thickness). Value of 0 will disable this feature.|`2.0`| 125 | |cloud_color (bool)|If True, it will adjust the color of the cloud based on the mean color of the clear sky image|`True`| 126 | |return_cloud (bool)|If True, it will return a channel-wise cloud mask of shape [height, width, channels] along with the cloudy image|`True`| 127 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .src import * 2 | -------------------------------------------------------------------------------- /imgs/back_blur.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/back_blur.png -------------------------------------------------------------------------------- /imgs/basic_cloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/basic_cloud.png -------------------------------------------------------------------------------- /imgs/channel_offset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/channel_offset.png -------------------------------------------------------------------------------- /imgs/clean.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/clean.tif -------------------------------------------------------------------------------- /imgs/cloud_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/cloud_color.png -------------------------------------------------------------------------------- /imgs/cloudy.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/cloudy.tif -------------------------------------------------------------------------------- /imgs/foggy_cloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/foggy_cloud.png -------------------------------------------------------------------------------- /imgs/inverted_cloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/inverted_cloud.png -------------------------------------------------------------------------------- /imgs/mask.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/mask.tif -------------------------------------------------------------------------------- /imgs/rgb_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/rgb_example.png -------------------------------------------------------------------------------- /imgs/thick_cloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/thick_cloud.png -------------------------------------------------------------------------------- /imgs/thin_fog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/strath-ai/SatelliteCloudGenerator/d0787cbdc1b39a7976f89aa085f1430f3207eda6/imgs/thin_fog.png -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='satellite-cloud-generator', 5 | version='0.4', 6 | description='A PyTorch-based tool for simulating clouds in satellite images', 7 | long_description='A PyTorch-based tool for simulating clouds in satellite images', 8 | url='https://github.com/strath-ai/SatelliteCloudGenerator', 9 | author='Mikolaj Czerkawski, Christos Tachtatzis', 10 | author_email="mikolaj.czerkawski@esa.int", 11 | package_dir={"satellite_cloud_generator":"src"}, 12 | install_requires=[ 13 | "torch>=1.10.0", 14 | "torchvision", 15 | "kornia", 16 | "numpy", 17 | "imageio", 18 | ], 19 | ) 20 | -------------------------------------------------------------------------------- /src/CloudSimulator.py: -------------------------------------------------------------------------------- 1 | import torch as torch 2 | from torch.nn import functional as F 3 | import numpy as np 4 | from matplotlib import pyplot as plt 5 | import kornia.geometry.transform as KT 6 | import random 7 | 8 | from .LocalGaussianBlur import * 9 | from .noise import * 10 | from .configs import * 11 | from .extras import * 12 | 13 | # --- Extra Functions 14 | 15 | def misalign_channels(cloud): 16 | return cloud 17 | 18 | def cloud_hue(image, cloud, scale=1.0): 19 | """ 20 | Mixes the pure white base of a cloud with mean color of the underlying image 21 | 22 | scale (float) controls how 'white' the end result is (the lower the value, the more color the cloud will have) 23 | """ 24 | # Mean Cloud-Free Color 25 | mean_color = image.mean((-2,-1)) 26 | ambience = torch.ones_like(image) 27 | 28 | mask=(cloud*scale).clip(0,1) 29 | 30 | # Safety against pitch black 31 | for b_idx in range(mean_color.shape[0]): 32 | if all(mean_color[b_idx])==0: 33 | mean_color[b_idx]=torch.ones_like(mean_color[b_idx]) # prevent mixing with pitch black 34 | 35 | color_vector=mean_color/mean_color.mean(1,keepdim=True) 36 | color_vector/=color_vector.max(1,keepdim=True)[0] # ensure that no value exceeds 1.0 37 | return ambience*mask + color_vector.unsqueeze(-1).unsqueeze(-1)*ambience*(1-mask) 38 | 39 | 40 | # --- Mixing Methods 41 | 42 | def mix(input, cloud, shadow=None, channel_magnitude=None, blur_scaling=2.0, cloud_color=True, invert=False): 43 | """ Mixing Operation for an input image and a cloud 44 | 45 | Args: 46 | input (Tensor) : input image [height, width, channels] 47 | 48 | cloud (Tensor) : cloud of the same shape [height, width, channels] 49 | 50 | blur_scaling (float): Scaling factor for the variance of locally varying Gaussian blur (dependent on cloud thickness). Value of 0 will disable this feature. 51 | 52 | cloud_color (bool): If True, it will adjust the color of the cloud based on the mean color of the clear sky image 53 | 54 | invert (bool) : for some applications, the cloud can be inverted to effectively decrease the level of reflected power (see thermal example in the notebook) 55 | 56 | Returns: 57 | 58 | Tensor: Tensor containing a mixed image 59 | 60 | """ 61 | if channel_magnitude is None: 62 | channel_magnitude=torch.ones(*input.shape[:-2],1,1,device=input.device) 63 | else: 64 | channel_magnitude=channel_magnitude.view(*input.shape[:-2],1,1) 65 | 66 | if shadow is not None: 67 | # reuse the same function, with shadow mask as 'cloud' mask (and inverting the 68 | input=mix(input, shadow, blur_scaling=0.0, cloud_color=False, invert=not invert) 69 | 70 | # blurring background (optional) 71 | if blur_scaling != 0.0: 72 | modulator = cloud.mean(1) # average cloud thickness 73 | input = local_gaussian_blur(input, 74 | blur_scaling*modulator) 75 | 76 | # mix the cloud 77 | if invert: 78 | output = input * (1-cloud.clip(0,1)) 79 | else: 80 | # use max_lvl to multiply the resulting cloud base 81 | max_lvl=cloud.max() if cloud.max()>1.0 else 1.0 82 | cloud_base = torch.ones_like(input) if not cloud_color else cloud_hue(input, cloud) 83 | cloud_base = channel_magnitude*cloud_base 84 | output = input*(1-cloud/max_lvl) + max_lvl*cloud_base*(cloud/max_lvl) 85 | 86 | return output 87 | 88 | class CloudGenerator(torch.nn.Module): 89 | 90 | """ Wrapper object for the add_cloud() and add_cloud_and_shadow() methods. 91 | It stores the parameters to these functions in a config dictionary 92 | 93 | 94 | """ 95 | 96 | def __init__(self, 97 | config, 98 | cloud_p=1.0, 99 | shadow_p=1.0 100 | ): 101 | super().__init__() 102 | 103 | self.cloud_p=cloud_p 104 | self.shadow_p=shadow_p 105 | 106 | if isinstance(config,dict): 107 | self.config=[config] # put into a list if it's a single config 108 | else: 109 | self.config=config 110 | 111 | def choose_config(self): 112 | return random.choice(self.config) 113 | 114 | def segmentation_mask(self,*args,**kwargs): 115 | # wraps for segmentation mask for external use 116 | return segmentation_mask(*args,**kwargs) 117 | 118 | def forward(self,img,*args,return_cloud=False,**kwargs): 119 | # decide which config from the list (if multiple) 120 | used_config=self.choose_config() 121 | 122 | # decide what to simulate 123 | do_cloud=random.random()<=self.cloud_p 124 | do_shadow=random.random()<=self.shadow_p 125 | 126 | # synth both cloud and shadow 127 | if do_shadow and do_cloud: 128 | out=add_cloud_and_shadow(img,*args,**kwargs,**used_config, return_cloud=return_cloud) 129 | 130 | if return_cloud: 131 | out,cloud,shadow=out 132 | else: 133 | cloud,shadow=None,None 134 | 135 | # synth only cloud 136 | elif do_cloud: 137 | out=add_cloud(img,*args,**kwargs,**used_config,return_cloud=return_cloud) 138 | 139 | if return_cloud: 140 | out,cloud=out 141 | shadow=torch.zeros_like(out) 142 | else: 143 | cloud,shadow=None,None 144 | # no additions 145 | else: 146 | out=torch.from_numpy(img) if not torch.is_tensor(img) else img 147 | cloud,shadow=torch.zeros_like(out),torch.zeros_like(out) 148 | 149 | # return format 150 | if return_cloud: 151 | return out,cloud,shadow 152 | else: 153 | return out 154 | 155 | def __or__(self, other): 156 | cfg1=self.config 157 | cfg2=other.config 158 | # inherit maximum probability of the two parents 159 | cloud_p=max([self.cloud_p, other.cloud_p]) 160 | shadow_p=max([self.shadow_p, other.shadow_p]) 161 | return CloudGenerator(config=cfg1+cfg2, 162 | cloud_p=cloud_p, 163 | shadow_p=shadow_p) 164 | 165 | def __str__(self): 166 | N=len(self.config) 167 | 168 | return ("CloudGenerator(cloud_p={:.2f},shadow_p={:.2f},{} config(s))").format(self.cloud_p,self.shadow_p,N) 169 | 170 | def __repr__(self): 171 | N=len(self.config) 172 | 173 | config_desc="" 174 | for c in self.config: 175 | config_desc+="\n{" 176 | for key in c: 177 | config_desc+="\t{}: {}\n".format(key,c[key]) 178 | config_desc+="}" 179 | 180 | return ("CloudGenerator(cloud_p={:.2f},shadow_p={:.2f},\n{} config(s):{})").format(self.cloud_p,self.shadow_p,N,config_desc) 181 | 182 | def add_cloud(input, 183 | max_lvl=(0.95,1.0), 184 | min_lvl=(0.0, 0.05), 185 | channel_magnitude=None, 186 | clear_threshold=0.0, 187 | noise_type = 'perlin', 188 | const_scale=True, 189 | decay_factor=1, 190 | locality_degree=1, 191 | invert=False, 192 | channel_magnitude_shift=0.05, 193 | channel_offset=2, 194 | blur_scaling=2.0, 195 | cloud_color=True, 196 | return_cloud=False 197 | ): 198 | """ Takes an input image of shape [batch, channels, height, width] 199 | and returns a generated cloudy version of the input image 200 | 201 | Args: 202 | input (Tensor) : input image in shape [B,C,H,W] 203 | 204 | max_lvl (float or tuple of floats): Indicates the maximum strength of the cloud (1.0 means that some pixels will be fully non-transparent) 205 | 206 | min_lvl (float or tuple of floats): Indicates the minimum strength of the cloud (0.0 means that some pixels will have no cloud) 207 | channel_magnitude (Tensor) : cloud magnitudes in each channel, shape [B,C,1,1] 208 | 209 | clear_threshold (float): An optional threshold for cutting off some part of the initial generated cloud mask 210 | 211 | noise_type (string: 'perlin', 'flex'): Method of noise generation (currently supported: 'perlin', 'flex') 212 | 213 | const_scale (bool): If True, the spatial frequencies of the cloud shape are scaled based on the image size (this makes the cloud preserve its appearance regardless of image resolution) 214 | 215 | decay_factor (float): decay factor that narrows the spectrum of the generated noise (higher values, such as 2.0 will reduce the amplitude of high spatial frequencies, yielding a 'blurry' cloud) 216 | 217 | locality degree (int): more local clouds shapes can be achieved by multiplying several random cloud shapes with each other (value of 1 disables this effect, and higher integers correspond to the number of multiplied masks) 218 | 219 | invert (bool) : for some applications, the cloud can be inverted to effectively decrease the level of reflected power (see thermal example in the notebook) 220 | 221 | channel_offset (int): optional offset that can randomly misalign spatially the individual cloud mask channels (by a value in range -channel_offset and +channel_offset) 222 | 223 | channel_magniutde_shift (float): optional offset from the reference cloud mask magnitude for individual channels, if non-zero, then each channel will have a cloud magnitude uniformly sampled from C+-channel_magnitude, where C is the reference cloud mask 224 | 225 | blur_scaling (float): Scaling factor for the variance of locally varying Gaussian blur (dependent on cloud thickness). Value of 0 will disable this feature. 226 | 227 | cloud_color (bool): If True, it will adjust the color of the cloud based on the mean color of the clear sky image 228 | 229 | return_cloud (bool): If True, it will return a channel-wise cloud mask of shape [height, width, channels] along with the cloudy image 230 | 231 | Returns: 232 | 233 | Tensor: Tensor containing a generated cloudy image (and a cloud mask if return_cloud == True) 234 | 235 | """ 236 | 237 | if not torch.is_tensor(input): 238 | input = torch.FloatTensor(input) 239 | 240 | while len(input.shape) < 4: 241 | input = input.unsqueeze(0) 242 | 243 | b,c,h,w = input.shape 244 | device=input.device 245 | 246 | # --- Potential Sampling of Parameters (if provided as a range) 247 | min_lvl=torch.tensor(min_lvl, device=device) 248 | max_lvl=torch.tensor(max_lvl, device=device) 249 | 250 | if len(min_lvl.shape) != 0: 251 | min_lvl = min_lvl[0] +(min_lvl[1]-min_lvl[0])*torch.rand([b,1,1,1], device=device) 252 | 253 | # max_lvl is dependent on min_lvl (cannot be less than min_lvl) 254 | if len(max_lvl.shape) != 0: 255 | max_floor=min_lvl+F.relu(max_lvl[0]-min_lvl) 256 | max_lvl = max_floor + (max_lvl[1]-max_floor)*torch.rand([b,1,1,1], device=device) 257 | 258 | # ensure max_lvl does not go below min_lvl 259 | max_lvl=min_lvl+F.relu(max_lvl-min_lvl) 260 | 261 | # clear_threshold 262 | if isinstance(clear_threshold, tuple) or isinstance(clear_threshold, list): 263 | clear_threshold = clear_threshold[0] +(clear_threshold[1]-clear_threshold[0])*torch.rand([b,1,1], device=device) 264 | 265 | # decay_factor 266 | if isinstance(decay_factor, tuple) or isinstance(decay_factor, list): 267 | decay_factor = float(decay_factor[0] +(decay_factor[1]-decay_factor[0])*torch.rand([1,1])) 268 | 269 | # locality_degree 270 | if isinstance(locality_degree, tuple) or isinstance(locality_degree, list): 271 | locality_degree = int(locality_degree[0]+torch.randint(1+locality_degree[1]-locality_degree[0],(1,1))) 272 | 273 | # --- End of Parameter Sampling 274 | locality_degree=max([1, int(locality_degree)]) 275 | 276 | net_noise_shape=torch.ones((b,h,w),device=device) 277 | for idx in range(locality_degree): 278 | # generate noise shape 279 | if noise_type == 'perlin': 280 | noise_shape=generate_perlin(shape=(h,w), batch=b, device=device, const_scale=const_scale, decay_factor=decay_factor) 281 | elif noise_type == 'flex': 282 | noise_shape = flex_noise(h,w, const_scale=const_scale, decay_factor=decay_factor) 283 | else: 284 | raise NotImplementedError 285 | 286 | noise_shape -= noise_shape.min() 287 | noise_shape /= noise_shape.max() 288 | 289 | net_noise_shape*=noise_shape 290 | 291 | # apply non-linearities and rescale 292 | net_noise_shape[net_noise_shape < clear_threshold] = 0.0 293 | net_noise_shape -= clear_threshold 294 | net_noise_shape = net_noise_shape.clip(0,1) 295 | if not net_noise_shape.max()==0: 296 | net_noise_shape /= net_noise_shape.max() 297 | 298 | # channel-wise mask 299 | cloud=(net_noise_shape.unsqueeze(1)*(max_lvl-min_lvl) + min_lvl).expand(b,c,h,w) 300 | 301 | # channel-wise thickness difference 302 | if channel_magnitude_shift != 0.0: 303 | channel_magnitude_shift=abs(channel_magnitude_shift) 304 | weights=channel_magnitude_shift*(2*torch.rand(c, device=device)-1)+1 305 | cloud=(weights[:,None,None]*cloud) 306 | 307 | # channel offset (optional) 308 | if channel_offset != 0: 309 | offsets = torch.randint(-channel_offset, channel_offset+1, (2,c)) 310 | 311 | crop_val = offsets.max().abs() 312 | if crop_val != 0: 313 | for ch in range(cloud.shape[1]): 314 | cloud[:,ch] = torch.roll(cloud[:,ch], offsets[0,ch].item(),dims=-2) 315 | cloud[:,ch] = torch.roll(cloud[:,ch], offsets[1,ch].item(),dims=-1) 316 | 317 | cloud = KT.resize(cloud[:,:,crop_val:-crop_val-1, crop_val:-crop_val-1], 318 | (h,w), 319 | interpolation='bilinear', 320 | align_corners=True) 321 | 322 | # transparency between 0 and 1 323 | cloud=cloud.clip(0,1) 324 | 325 | if channel_magnitude is None: 326 | channel_magnitude=torch.ones(*input.shape[:-2],1,1,device=input.device) 327 | 328 | output = mix(input, cloud, channel_magnitude=channel_magnitude, blur_scaling=blur_scaling, cloud_color=cloud_color, invert=invert) 329 | 330 | if not return_cloud: 331 | return output 332 | else: 333 | return output, cloud# if not invert else 1-cloud 334 | 335 | def add_cloud_and_shadow(input, 336 | max_lvl=(0.95,1.0), 337 | min_lvl=(0.0, 0.05), 338 | channel_magnitude=None, 339 | shadow_max_lvl=[0.3,0.6], 340 | clear_threshold=0.0, 341 | noise_type = 'perlin', 342 | const_scale=True, 343 | decay_factor=1, 344 | locality_degree=1, 345 | channel_offset=2, 346 | channel_magnitude_shift=0.05, 347 | blur_scaling=2.0, 348 | cloud_color=True, 349 | return_cloud=False 350 | ): 351 | """ Takes an input image of shape [batch,channels,height, width] 352 | and returns a generated cloudy version of the input image, with additional shadows added to the ground image 353 | 354 | Args: 355 | 356 | input (Tensor) : input image in shape [B,C,H,W] 357 | 358 | max_lvl (float or tuple of floats): Indicates the maximum strength of the cloud (1.0 means that some pixels will be fully non-transparent) 359 | 360 | min_lvl (float or tuple of floats): Indicates the minimum strength of the cloud (0.0 means that some pixels will have no cloud) 361 | channel_magnitude (Tensor) : cloud magnitudes in each channel, shape [B,C,1,1] 362 | 363 | clear_threshold (float): An optional threshold for cutting off some part of the initial generated cloud mask 364 | 365 | shadow_max_lvl (float): Indicates the maximum strength of the cloud (1.0 means that some pixels will be completely black) 366 | 367 | noise_type (string: 'perlin', 'flex'): Method of noise generation (currently supported: 'perlin', 'flex') 368 | 369 | const_scale (bool): If True, the spatial frequencies of the cloud/shadow shape are scaled based on the image size (this makes the cloud preserve its appearance regardless of image resolution) 370 | 371 | decay_factor (float): decay factor that narrows the spectrum of the generated noise (higher values, such as 2.0 will reduce the amplitude of high spatial frequencies, yielding a 'blurry' cloud) 372 | 373 | locality degree (int): more local clouds shapes can be achieved by multiplying several random cloud shapes with each other (value of 1 disables this effect, and higher integers correspond to the number of multiplied masks) 374 | 375 | channel_offset (int): optional offset that can randomly misalign spatially the individual cloud mask channels (by a value in range -channel_offset and +channel_offset) 376 | 377 | blur_scaling (float): Scaling factor for the variance of locally varying Gaussian blur (dependent on cloud thickness). Value of 0 will disable this feature. 378 | 379 | cloud_color (bool): If True, it will adjust the color of the cloud based on the mean color of the clear sky image 380 | 381 | return_cloud (bool): If True, it will return a channel-wise cloud mask of shape [height, width, channels] along with the cloudy image 382 | 383 | Returns: 384 | 385 | Tensor: Tensor containing a generated cloudy image (and a cloud mask if return_cloud == True) 386 | 387 | """ 388 | 389 | # 1. Add Shadows 390 | if isinstance(locality_degree,int): 391 | shadow_locality_degree=locality_degree-1 392 | else: 393 | shadow_locality_degree=[i-1 for i in locality_degree] 394 | 395 | # but don't add shadows if cloud level 'floor' is above 0... 396 | if isinstance(min_lvl,list) or isinstance(min_lvl,tuple): 397 | if min_lvl[0] > 0.0: 398 | shadow_max_lvl=0.0 399 | else: 400 | if min_lvl > 0.0: 401 | shadow_max_lvl=0.0 402 | 403 | input, shadow_mask = add_cloud(input, 404 | max_lvl=shadow_max_lvl, 405 | min_lvl=0.0, 406 | clear_threshold=0.4, 407 | noise_type = 'perlin', 408 | const_scale=const_scale, 409 | decay_factor=1.5, # Suppress HF detail 410 | locality_degree=shadow_locality_degree, # make similar locality as cloud (-1 works well because it's lower frequency) 411 | invert=True, # Invert Color for shadow 412 | channel_offset=0, # Cloud SFX disabled 413 | channel_magnitude_shift=0.0, # Cloud SFX disable 414 | blur_scaling=0.0, # Cloud SFX disabled 415 | cloud_color=False, # Cloud SFX disabled 416 | return_cloud=True 417 | ) 418 | 419 | # 2. Add Cloud 420 | input, cloud_mask = add_cloud(input, 421 | max_lvl=max_lvl, 422 | min_lvl=min_lvl, 423 | channel_magnitude=channel_magnitude, 424 | clear_threshold=clear_threshold, 425 | noise_type=noise_type, 426 | const_scale=const_scale, 427 | decay_factor=decay_factor, 428 | locality_degree=locality_degree, 429 | invert=False, 430 | channel_offset=channel_offset, 431 | channel_magnitude_shift=channel_magnitude_shift, 432 | blur_scaling=blur_scaling, 433 | cloud_color=cloud_color, 434 | return_cloud=True 435 | ) 436 | 437 | if not return_cloud: 438 | return input 439 | else: 440 | return input, cloud_mask, shadow_mask -------------------------------------------------------------------------------- /src/LocalGaussianBlur/LocalGaussianBlur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | 4 | __all__ = [ 5 | "LocalGaussianBlur", 6 | "local_gaussian_blur" 7 | ] 8 | 9 | def gaussian_kernels(stds,size=11): 10 | """ Takes a series of std values of length N 11 | and integer size corresponding to kernel side length M 12 | and returns a set of gaussian kernels with those stds in a (N,M,M) tensor 13 | 14 | Args: 15 | stds (Tensor): Flat list tensor containing std values. 16 | size (int): Size of the Gaussian kernel. 17 | 18 | Returns: 19 | Tensor: Tensor containing a unique 2D Gaussian kernel for each value in the stds input. 20 | 21 | """ 22 | # 1. create input vector to the exponential function 23 | n = (torch.arange(0, size, device=stds.device) - (size - 1.0) / 2.0).unsqueeze(-1) 24 | var = 2*(stds**2).unsqueeze(-1) + 1e-8 # add constant for stability 25 | 26 | # 2. compute gaussian values with exponential 27 | kernel_1d = torch.exp((-n**2)/var.t()).permute(1,0) 28 | # 3. outer product in a batch 29 | kernel_2d = torch.bmm(kernel_1d.unsqueeze(2), kernel_1d.unsqueeze(1)) 30 | # 4. normalize to unity sum 31 | kernel_2d /= kernel_2d.sum(dim=(-1,-2)).view(-1,1,1) 32 | 33 | return kernel_2d 34 | 35 | def local_gaussian_blur(input, modulator, kernel_size=11): 36 | """Blurs image with dynamic Gaussian blur. 37 | 38 | Args: 39 | input (Tensor): The image to be blurred (C,H,W). 40 | modulator (Tensor): The modulating signal that determines the local value of kernel variance (H,W). 41 | kernel_size (int): Size of the Gaussian kernel. 42 | 43 | Returns: 44 | Tensor: Locally blurred version of the input image. 45 | 46 | """ 47 | 48 | if len(input.shape) < 4: 49 | input = input.unsqueeze(0) 50 | 51 | b,c,h,w = input.shape 52 | pad = int((kernel_size-1)/2) 53 | 54 | # 1. pad the input with replicated values 55 | inp_pad = torch.nn.functional.pad(input, pad=(pad,pad,pad,pad), mode='replicate') 56 | # 2. Create a Tensor of varying Gaussian Kernel 57 | kernels = gaussian_kernels(modulator.flatten()).view(b,-1,kernel_size,kernel_size) 58 | #kernels_rgb = torch.stack(c*[kernels], 1) 59 | kernels_rgb=kernels.unsqueeze(1).expand(kernels.shape[0],c,*kernels.shape[1:]) 60 | # 3. Unfold input 61 | inp_unf = torch.nn.functional.unfold(inp_pad, (kernel_size,kernel_size)) 62 | # 4. Multiply kernel with unfolded 63 | x1 = inp_unf.view(b,c,-1,h*w) 64 | x2 = kernels_rgb.view(b,c,h*w,-1).permute(0,1,3,2)#.unsqueeze(0) 65 | y = (x1*x2).sum(2) 66 | # 5. Fold and return 67 | return torch.nn.functional.fold(y,(h,w),(1,1)) 68 | 69 | 70 | class LocalGaussianBlur(torch.nn.Module): 71 | """Blurs image with dynamic Gaussian blur. 72 | If the image is torch Tensor, it is expected 73 | to have [B, C, H, W] shape. 74 | 75 | Args: 76 | kernel_size (int): Size of the Gaussian kernel. 77 | 78 | Returns: 79 | Tensor: Gaussian blurred version of the input image. 80 | 81 | """ 82 | 83 | def __init__(self, kernel_size=11): 84 | super().__init__() 85 | self.kernel_size = kernel_size 86 | 87 | 88 | def forward(self, img: Tensor, modulator: Tensor) -> Tensor: 89 | """ 90 | Args: 91 | img (Tensor): image to be blurred. 92 | modulator (Tensor): signal modulating the kernel variance (shape H x W). 93 | 94 | Returns: 95 | PIL Image or Tensor: Gaussian blurred image 96 | """ 97 | return local_gaussian_blur(img, modulator, kernel_size=self.kernel_size) 98 | 99 | 100 | def __repr__(self) -> str: 101 | s = f"{self.__class__.__name__}(kernel_size={self.kernel_size}" 102 | return s -------------------------------------------------------------------------------- /src/LocalGaussianBlur/README.md: -------------------------------------------------------------------------------- 1 | # LocalGaussianBlur 2 | PyTorch implementation of a Gaussian Blur operation with changing kernel variance 3 | 4 | The operation has similar interface to the scripts in `torchvision`: 5 | 6 | ```python 7 | lblur = LocalGaussianBlur() 8 | 9 | blurred = lblur(image,modulator) 10 | ``` 11 | 12 | See the jupyter notebook for a working example. 13 | -------------------------------------------------------------------------------- /src/LocalGaussianBlur/__init__.py: -------------------------------------------------------------------------------- 1 | from .src import * 2 | -------------------------------------------------------------------------------- /src/LocalGaussianBlur/src/LocalGaussianBlur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | 4 | __all__ = [ 5 | "LocalGaussianBlur", 6 | "local_gaussian_blur" 7 | ] 8 | 9 | def gaussian_kernels(stds,size=11): 10 | """ Takes a series of std values of length N 11 | and integer size corresponding to kernel side length M 12 | and returns a set of gaussian kernels with those stds in a (N,M,M) tensor 13 | 14 | Args: 15 | stds (Tensor): Flat list tensor containing std values. 16 | size (int): Size of the Gaussian kernel. 17 | 18 | Returns: 19 | Tensor: Tensor containing a unique 2D Gaussian kernel for each value in the stds input. 20 | 21 | """ 22 | # 1. create input vector to the exponential function 23 | n = (torch.arange(0, size, device=stds.device) - (size - 1.0) / 2.0).unsqueeze(-1) 24 | var = 2*(stds**2).unsqueeze(-1) + 1e-8 # add constant for stability 25 | 26 | # 2. compute gaussian values with exponential 27 | kernel_1d = torch.exp((-n**2)/var.t()).permute(1,0) 28 | # 3. outer product in a batch 29 | kernel_2d = torch.bmm(kernel_1d.unsqueeze(2), kernel_1d.unsqueeze(1)) 30 | # 4. normalize to unity sum 31 | kernel_2d /= kernel_2d.sum(dim=(-1,-2)).view(-1,1,1) 32 | 33 | return kernel_2d 34 | 35 | def local_gaussian_blur(input, modulator, kernel_size=11): 36 | """Blurs image with dynamic Gaussian blur. 37 | 38 | Args: 39 | input (Tensor): The image to be blurred (C,H,W). 40 | modulator (Tensor): The modulating signal that determines the local value of kernel variance (H,W). 41 | kernel_size (int): Size of the Gaussian kernel. 42 | 43 | Returns: 44 | Tensor: Locally blurred version of the input image. 45 | 46 | """ 47 | 48 | if len(input.shape) < 4: 49 | input = input.unsqueeze(0) 50 | 51 | b,c,h,w = input.shape 52 | pad = int((kernel_size-1)/2) 53 | 54 | # 1. pad the input with replicated values 55 | inp_pad = torch.nn.functional.pad(input, pad=(pad,pad,pad,pad), mode='replicate') 56 | # 2. Create a Tensor of varying Gaussian Kernel 57 | kernels = gaussian_kernels(modulator.flatten()).view(b,-1,kernel_size,kernel_size) 58 | #kernels_rgb = torch.stack(c*[kernels], 1) 59 | kernels_rgb=kernels.unsqueeze(1).expand(kernels.shape[0],c,*kernels.shape[1:]) 60 | # 3. Unfold input 61 | inp_unf = torch.nn.functional.unfold(inp_pad, (kernel_size,kernel_size)) 62 | # 4. Multiply kernel with unfolded 63 | x1 = inp_unf.view(b,c,-1,h*w) 64 | x2 = kernels_rgb.view(b,c,h*w,-1).permute(0,1,3,2)#.unsqueeze(0) 65 | y = (x1*x2).sum(2) 66 | # 5. Fold and return 67 | return torch.nn.functional.fold(y,(h,w),(1,1)) 68 | 69 | 70 | class LocalGaussianBlur(torch.nn.Module): 71 | """Blurs image with dynamic Gaussian blur. 72 | If the image is torch Tensor, it is expected 73 | to have [B, C, H, W] shape. 74 | 75 | Args: 76 | kernel_size (int): Size of the Gaussian kernel. 77 | 78 | Returns: 79 | Tensor: Gaussian blurred version of the input image. 80 | 81 | """ 82 | 83 | def __init__(self, kernel_size=11): 84 | super().__init__() 85 | self.kernel_size = kernel_size 86 | 87 | 88 | def forward(self, img: Tensor, modulator: Tensor) -> Tensor: 89 | """ 90 | Args: 91 | img (Tensor): image to be blurred. 92 | modulator (Tensor): signal modulating the kernel variance (shape H x W). 93 | 94 | Returns: 95 | PIL Image or Tensor: Gaussian blurred image 96 | """ 97 | return local_gaussian_blur(img, modulator, kernel_size=self.kernel_size) 98 | 99 | 100 | def __repr__(self) -> str: 101 | s = f"{self.__class__.__name__}(kernel_size={self.kernel_size}" 102 | return s -------------------------------------------------------------------------------- /src/LocalGaussianBlur/src/__init__.py: -------------------------------------------------------------------------------- 1 | from .LocalGaussianBlur import * -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | from .CloudSimulator import * 2 | from .band_magnitudes import * -------------------------------------------------------------------------------- /src/band_magnitudes.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # This file contains multiple approaches to extracting individual band magnitudes 4 | # If no specific band strengths for the cloud are provided, the cloud component 5 | # has approximately evenly distributed cloud strength across channels, obtained 6 | # using cloud_hue() or torch.ones_like() 7 | 8 | # most methods are based on the ratio between the clear region of a real reference image and the cloudy region 9 | 10 | def mean_mag(reference,mask,mask_cloudy=None,clean=None): 11 | """ Extract ratios of means 12 | 13 | Args: 14 | reference (Tensor) : input reference image containing cloud [height, width, channels] 15 | 16 | mask (Tensor) : mask, where 0.0 indicates a clear pixel (if mask_cloudy is provided, 1.0 indicates a clear pixel) 17 | 18 | mask_cloudy (Tensor) : optional mask, where 1.0 indicates cloud 19 | 20 | clean (Tensor) : optional image for multiplying the ratios by 21 | 22 | Returns: 23 | 24 | Tensor: Tensor containing cloud magnitudes (or ratios if clean==None) 25 | 26 | """ 27 | if mask_cloudy is None: 28 | mask_clean=mask.squeeze()==0.0 29 | mask_cloudy=mask.squeeze()!=0.0 30 | else: 31 | mask_clean=mask 32 | mask_cloudy=mask_cloudy 33 | 34 | full_cloud=(mask_clean!=1.0).all() 35 | no_cloud=(mask_cloudy==0.0).all() 36 | 37 | if no_cloud: 38 | return None 39 | 40 | # coef per band 41 | band_coefs=[] 42 | for idx in range(reference.shape[-3]): 43 | 44 | i=reference.index_select(-3,torch.tensor(idx,device=reference.device)) 45 | 46 | cloud_val=(i[mask_cloudy]).mean() 47 | clear_val=(i[mask_clean]).mean() if not full_cloud else 1 48 | 49 | band_coefs.append(cloud_val/clear_val) 50 | 51 | if clean is None: 52 | return band_coefs 53 | 54 | # cloud magnitude 55 | cloud_mag=torch.ones(clean.shape[:-2],device=clean.device) 56 | for idx in range(clean.shape[-3]): 57 | 58 | i=clean.index_select(-3,torch.tensor(idx,device=clean.device)) 59 | base=i.mean() if not full_cloud else 1 60 | cloud_mag[...,idx]=band_coefs[idx]*base 61 | 62 | return cloud_mag 63 | 64 | def max_mag(reference,mask,mask_cloudy=None,clean=None): 65 | """ Extract ratios of max values 66 | 67 | Args: 68 | reference (Tensor) : input reference image containing cloud [height, width, channels] 69 | 70 | mask (Tensor) : mask, where 0.0 indicates a clear pixel (if mask_cloudy is provided, 1.0 indicates a clear pixel) 71 | 72 | mask_cloudy (Tensor) : optional mask, where 1.0 indicates cloud 73 | 74 | clean (Tensor) : optional image for multiplying the ratios by 75 | 76 | Returns: 77 | 78 | Tensor: Tensor containing cloud magnitudes (or ratios if clean==None) 79 | 80 | """ 81 | if mask_cloudy is None: 82 | mask_clean=mask.squeeze()==0.0 83 | mask_cloudy=mask.squeeze()!=0.0 84 | else: 85 | mask_clean=mask 86 | mask_cloudy=mask_cloudy 87 | 88 | full_cloud=(mask_clean!=1.0).all() 89 | no_cloud=(mask_cloudy==0.0).all() 90 | 91 | if no_cloud: 92 | return None 93 | 94 | # coef per band 95 | band_coefs=[] 96 | for idx in range(reference.shape[-3]): 97 | 98 | i=reference.index_select(-3,torch.tensor(idx,device=reference.device)) 99 | 100 | cloud_val=(i[mask_cloudy]).max() 101 | clear_val=(i[mask_clean]).max() if not full_cloud else 1 102 | 103 | band_coefs.append(cloud_val/clear_val) 104 | 105 | if clean is None: 106 | return band_coefs 107 | 108 | # cloud magnitude 109 | cloud_mag=torch.ones(clean.shape[:-2],device=clean.device) 110 | for idx in range(clean.shape[-3]): 111 | 112 | i=clean.index_select(-3,torch.tensor(idx,device=clean.device)) 113 | base=i.median() if not full_cloud else 1 114 | cloud_mag[...,idx]=band_coefs[idx]*base 115 | 116 | return cloud_mag 117 | 118 | def median_mag(reference,mask,mask_cloudy=None,clean=None): 119 | """ Extract ratios of medians 120 | 121 | Args: 122 | reference (Tensor) : input reference image containing cloud [height, width, channels] 123 | 124 | mask (Tensor) : mask, where 0.0 indicates a clear pixel (if mask_cloudy is provided, 1.0 indicates a clear pixel) 125 | 126 | mask_cloudy (Tensor) : optional mask, where 1.0 indicates cloud 127 | 128 | clean (Tensor) : optional image for multiplying the ratios by 129 | 130 | Returns: 131 | 132 | Tensor: Tensor containing cloud magnitudes (or ratios if clean==None) 133 | 134 | """ 135 | if mask_cloudy is None: 136 | mask_clean=mask.squeeze()==0.0 137 | mask_cloudy=mask.squeeze()!=0.0 138 | else: 139 | mask_clean=mask 140 | mask_cloudy=mask_cloudy 141 | 142 | full_cloud=(mask_clean!=1.0).all() 143 | no_cloud=(mask_cloudy==0.0).all() 144 | 145 | if no_cloud: 146 | return None 147 | 148 | # coef per band 149 | band_coefs=[] 150 | for idx in range(reference.shape[-3]): 151 | 152 | i=reference.index_select(-3,torch.tensor(idx,device=reference.device)) 153 | 154 | cloud_val=(i[mask_cloudy]).median() 155 | clear_val=(i[mask_clean]).median() if not full_cloud else 1 156 | 157 | band_coefs.append(cloud_val/clear_val) 158 | 159 | if clean is None: 160 | return band_coefs 161 | 162 | # cloud magnitude 163 | cloud_mag=torch.ones(clean.shape[:-2],device=clean.device) 164 | for idx in range(clean.shape[-3]): 165 | 166 | i=clean.index_select(-3,torch.tensor(idx,device=clean.device)) 167 | base=i.median() if not full_cloud else 1 168 | cloud_mag[...,idx]=band_coefs[idx]*base 169 | 170 | return cloud_mag 171 | 172 | def q_mag(reference,mask,mask_cloudy=None, clean=None,q=0.95,q2=None): 173 | """ Extract ratios of quantiles 174 | 175 | Args: 176 | reference (Tensor) : input reference image containing cloud [height, width, channels] 177 | 178 | mask (Tensor) : mask, where 0.0 indicates a clear pixel (if mask_cloudy is provided, 1.0 indicates a clear pixel) 179 | 180 | mask_cloudy (Tensor) : optional mask, where 1.0 indicates cloud 181 | 182 | clean (Tensor) : optional image for multiplying the ratios by 183 | 184 | q (float) : quantile value used for the cloudy region 185 | 186 | q2 (float) : optional quantile value used for the clear region (if unspecifed, it is equal to q) 187 | 188 | Returns: 189 | 190 | Tensor: Tensor containing cloud magnitudes (or ratios if clean==None) 191 | 192 | """ 193 | if mask_cloudy is None: 194 | mask_clean=mask.squeeze()==0.0 195 | mask_cloudy=mask.squeeze()!=0.0 196 | else: 197 | mask_clean=mask 198 | mask_cloudy=mask_cloudy 199 | 200 | full_cloud=(mask_clean!=1.0).all() 201 | no_cloud=(mask_cloudy==0.0).all() 202 | 203 | if no_cloud: 204 | return None 205 | 206 | if q2 is None: 207 | q2=q 208 | 209 | # coef per band 210 | band_coefs=[] 211 | for idx in range(reference.shape[-3]): 212 | 213 | i=reference.index_select(-3,torch.tensor(idx,device=reference.device)) 214 | 215 | cloud_val=(i[mask_cloudy]).quantile(q) 216 | clear_val=(i[mask_clean]).quantile(q2) if not full_cloud else 1 217 | 218 | band_coefs.append(cloud_val/clear_val) 219 | 220 | if clean is None: 221 | return band_coefs 222 | 223 | # cloud magnitude 224 | cloud_mag=torch.ones(clean.shape[:-2],device=clean.device) 225 | for idx in range(clean.shape[-3]): 226 | 227 | i=clean.index_select(-3,torch.tensor(idx,device=clean.device)) 228 | base=i.quantile(q2) if not full_cloud else 1 229 | cloud_mag[...,idx]=band_coefs[idx]*base 230 | 231 | return cloud_mag 232 | -------------------------------------------------------------------------------- /src/configs.py: -------------------------------------------------------------------------------- 1 | # wide 2 | WIDE_CONFIG={'min_lvl': [0.0,0.8], 3 | 'max_lvl': [0.0,1.0], 4 | 'const_scale':True, 5 | 'decay_factor':[0.8,1.2], 6 | 'clear_threshold':[0.0,0.1], 7 | 'locality_degree':[1,3], 8 | 'cloud_color':True, 9 | 'channel_offset':2, 10 | 'blur_scaling':2 11 | } 12 | 13 | # thick global 14 | BIG_CONFIG={'min_lvl':0.0, 15 | 'max_lvl':1.0, 16 | 'const_scale':True, 17 | 'decay_factor':1.0, 18 | 'clear_threshold':[0.0,0.2], 19 | 'locality_degree':1, 20 | 'cloud_color':True, 21 | 'channel_offset':2, 22 | 'blur_scaling':2 23 | } 24 | 25 | # thick local 26 | LOCAL_CONFIG={'min_lvl':0.0, 27 | 'max_lvl':1.0, 28 | 'const_scale':True, 29 | 'decay_factor':1.0, 30 | 'clear_threshold':[0.0,0.2], 31 | 'locality_degree':[2,4], 32 | 'cloud_color':True, 33 | 'channel_offset':2, 34 | 'blur_scaling':2 35 | } 36 | 37 | # thin cloud 38 | THIN_CONFIG={'min_lvl':[0.0,0.1], 39 | 'max_lvl':[0.4,0.7], 40 | 'const_scale':True, 41 | 'decay_factor':1.0, 42 | 'clear_threshold':0.0, 43 | 'locality_degree':[1,3], 44 | 'cloud_color':True, 45 | 'channel_offset':4, 46 | 'blur_scaling':2 47 | } 48 | 49 | # foggy 50 | FOG_CONFIG={'min_lvl':[0.3,0.6], 51 | 'max_lvl':[0.6,0.7], 52 | 'const_scale':True, 53 | 'decay_factor':1.0, 54 | 'clear_threshold':0.0, 55 | 'locality_degree':1, 56 | 'cloud_color':True, 57 | 'channel_offset':2, 58 | 'blur_scaling':2 59 | } -------------------------------------------------------------------------------- /src/extras.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def segmentation_mask(cloud,shadow=None,thin_range=(0.1,0.1),shadow_threshold=0.1): 5 | """ The following encoding method is used: 6 | 0: Cleary Sky 7 | 1: Cloud 8 | 2: Shadow 9 | 10 | ...or, if thin_range contains a pair of *different numbers* (a,b): 11 | 0: Clear Sky 12 | 1: Thick Cloud - cloud in range [a,b) 13 | 2: Thin Cloud - cloud in range [b,+inf) 14 | 3: Shadow 15 | 16 | 17 | Expected input shape for cloud and shadow: (H,W) or (C,H,W) or (B,C,H,W) 18 | """ 19 | 20 | while len(cloud.shape)<4: 21 | cloud.unsqueeze_(0) 22 | 23 | if shadow is None: 24 | shadow=torch.zeros_like(cloud) 25 | while len(shadow.shape)<4: 26 | shadow.unsqueeze_(0) 27 | 28 | # cloud and shadow are (B,C,H,W) by now 29 | b,c,h,w=cloud.shape 30 | 31 | seg_mask=torch.zeros(b,h,w) 32 | 33 | # get binary representations 34 | thick_cloud_b=1.0*(cloud.mean(-3)>=thin_range[1]) 35 | thin_cloud_b=1.0*(cloud.mean(-3)=thin_range[0])*(1.0-thick_cloud_b) 36 | shadow_b=1.0*(shadow.mean(-3)>shadow_threshold)*(1.0-thick_cloud_b)*(1.0-thin_cloud_b) 37 | 38 | if thin_range[0]==thin_range[1]: 39 | seg_mask=thick_cloud_b + 2*shadow_b 40 | else: 41 | seg_mask=thick_cloud_b + 2*thin_cloud_b + 3*shadow_b 42 | 43 | return seg_mask -------------------------------------------------------------------------------- /src/noise.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | # Noise Generation Methods 5 | 6 | def output_transform(x): 7 | # normalize to max magnitude of 1 8 | x /= max([x.max(), -x.min()]) 9 | # pass through tanh to ensure predefined range 10 | return (4*x).tanh() 11 | 12 | # --- Perlin Methods 13 | def interp(t): 14 | return 3 * t**2 - 2 * t ** 3 15 | 16 | def perlin(width, height, scale=10, batch=1, device=None): 17 | # based on https://gist.github.com/adefossez/0646dbe9ed4005480a2407c62aac8869 18 | 19 | gx, gy = torch.randn(2, batch, width + 1, height + 1, 1, 1, device=device) 20 | xs = torch.linspace(0, 1, scale + 1, device=device)[:-1, None] 21 | ys = torch.linspace(0, 1, scale + 1, device=device)[None, :-1] 22 | 23 | wx = 1 - interp(xs) 24 | wy = 1 - interp(ys) 25 | 26 | dots = 0 27 | dots += wx * wy * (gx[:,:-1, :-1] * xs + gy[:,:-1, :-1] * ys) 28 | dots += (1 - wx) * wy * (-gx[:,1:, :-1] * (1 - xs) + gy[:,1:, :-1] * ys) 29 | dots += wx * (1 - wy) * (gx[:,:-1, 1:] * xs - gy[:,:-1, 1:] * (1 - ys)) 30 | dots += (1 - wx) * (1 - wy) * (-gx[:,1:, 1:] * (1 - xs) - gy[:,1:, 1:] * (1 - ys)) 31 | 32 | return dots.permute(0, 1, 3, 2, 4).contiguous().view(batch, width * scale, height * scale) 33 | 34 | def generate_perlin(scales=None, shape=(256,256), batch=1, device='cpu', weights=None, const_scale=True, decay_factor=1): 35 | # Set Up Scales 36 | if scales is None: 37 | up_lim = max([2, int(np.log2(min(shape)))-1]) 38 | 39 | scales = [2**i for i in range(2,up_lim)] 40 | # proportional to image size, if const_scale is preserved 41 | if const_scale: 42 | f = int(2**np.floor(np.log2(0.25*max(shape)/max(scales)))) 43 | scales = [el*f for el in scales] 44 | 45 | if weights is None: 46 | weights = [el**decay_factor for el in scales] 47 | # Round shape to nearest power of 2 48 | big_shape = [int(2**(np.ceil(np.log2(i)))) for i in shape] 49 | out = torch.zeros([batch,*shape], device=device) 50 | for scale, weight in zip(scales, weights): 51 | out += weight*perlin(int(big_shape[0]/scale), int(big_shape[1]/scale), scale=scale, batch=batch, device=device)[...,:shape[0],:shape[1]] 52 | 53 | return output_transform(out) 54 | 55 | # --- Flexible Noise Filtering Methods 56 | def default_weight(input, const_scale=True, decay_factor=1): 57 | 58 | # scaling factor 59 | if const_scale: 60 | factor_multiplier=0.32*max(input.shape) 61 | else: 62 | factor_multiplier=64 63 | 64 | if isinstance(decay_factor, list) or isinstance(decay_factor, tuple): 65 | ret=0 66 | decay_factor=sorted(decay_factor)[::-1] 67 | for f in decay_factor: 68 | ret+=torch.exp(-factor_multiplier*f*input)/(decay_factor[0]/f) 69 | else: 70 | ret = torch.exp(-factor_multiplier*decay_factor*input) 71 | return ret 72 | 73 | def flex_noise(width, height, spectral_weight=default_weight, const_scale=False, decay_factor=1): 74 | raise NotImplemented # This needs to be converted to work with batches, and also generate full on the requested device 75 | # Source Noise 76 | x = torch.rand(width,height) - 0.5 77 | x_f = torch.fft.rfft2(x) 78 | 79 | # Weight Space (value proportional to euclidean distance from DC) 80 | x_space = torch.abs(torch.arange(-x_f.shape[0]/2, 81 | x_f.shape[0]/2, 82 | 1.0))*(x.shape[1]/x.shape[0]) # scaling to preserve 'aspect' 83 | y_space = torch.abs(torch.arange(0, 84 | x_f.shape[1], 85 | 1.0)) 86 | X_grid,Y_grid = torch.meshgrid(x_space,y_space) 87 | 88 | W_space = (X_grid**2+Y_grid**2)**0.5 89 | W_space /= W_space.max() 90 | 91 | # Modulation of Weight 92 | M = spectral_weight(W_space, const_scale=const_scale, decay_factor=decay_factor) 93 | 94 | # Application to Noise Spectrum 95 | return output_transform(torch.fft.irfft2(torch.fft.fftshift(M,0)*x_f)) --------------------------------------------------------------------------------