├── .gitignore ├── CITATION.cff ├── LICENSE ├── README.md ├── configs ├── burn_scars.py ├── multi_temporal_crop_classification.py └── sen1floods11_config.py ├── data_splits ├── multi_temporal_crop_classification │ ├── training_data.txt │ └── validation_data.txt └── sen1floods11 │ ├── bolivia_split.txt │ ├── test_split.txt │ ├── train_split.txt │ └── val_split.txt ├── exploration.ipynb ├── geospatial_fm ├── __init__.py ├── datasets.py ├── geospatial_fm.py ├── geospatial_pipelines.py └── temporal_encoder_decoder.py ├── hls-gfm └── README.md ├── model_inference.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *cache* 2 | *checkpoint* 3 | *egg* 4 | *json* 5 | *build* 6 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | 5 | - family-names: "Fraccaro" 6 | given-names: "Paolo" 7 | affiliation: "IBM Research" 8 | - family-names: "Gomes" 9 | given-names: "Carlos" 10 | affiliation: "IBM Research" 11 | - family-names: "Jakubik" 12 | given-names: "Johannes" 13 | affiliation: "IBM Research" 14 | - family-names: "Chu" 15 | given-names: "Linsong" 16 | affiliation: "IBM Research" 17 | - family-names: "Gabby" 18 | given-names: "Nyirjesy" 19 | affiliation: "IBM Research" 20 | - family-names: "Bangalore" 21 | given-names: "Ranjini" 22 | affiliation: "IBM Research" 23 | - family-names: "Lambhate" 24 | given-names: "Devyani" 25 | affiliation: "IBM Research" 26 | - family-names: "Das" 27 | given-names: "Kamal" 28 | affiliation: "IBM Research" 29 | - family-names: "Oliveira Borges" 30 | given-names: "Dario" 31 | affiliation: "IBM Research" 32 | - family-names: "Kimura" 33 | given-names: "Daiki" 34 | affiliation: "IBM Research" 35 | - family-names: "Simumba" 36 | given-names: "Naomi" 37 | affiliation: "IBM Research" 38 | - family-names: "Szwarcman" 39 | given-names: "Daniela" 40 | affiliation: "IBM Research" 41 | - family-names: "Muszynski" 42 | given-names: "Michal" 43 | affiliation: "IBM Research" 44 | - family-names: "Weldemariam" 45 | given-names: "Kommy" 46 | - family-names: "Edwards" 47 | given-names: "Blair" 48 | affiliation: "IBM Research" 49 | - family-names: "Schmude" 50 | given-names: "Johannes" 51 | affiliation: "IBM Research" 52 | - family-names: "Hamann" 53 | given-names: "Hendrik" 54 | affiliation: "IBM Research" 55 | - family-names: "Zadrozny" 56 | given-names: "Bianca" 57 | affiliation: "IBM Research" 58 | - family-names: "Ganti" 59 | given-names: "Raghu" 60 | affiliation: "IBM Research" 61 | - family-names: "Costa" 62 | given-names: "Carlos" 63 | affiliation: "IBM Research" 64 | - family-names: "Watson" 65 | given-names: "Campbell" 66 | affiliation: "IBM Research" 67 | - family-names: "Mukkavilli" 68 | given-names: "Karthik" 69 | affiliation: "IBM Research" 70 | - family-names: "Parkin" 71 | given-names: "Rob" 72 | affiliation: "IBM Research" 73 | 74 | - family-names: "Roy" 75 | given-names: "Sujit" 76 | affiliation: "University of Alabama in Huntsville" 77 | - family-names: "Phillips" 78 | given-names: "Christopher" 79 | affiliation: "University of Alabama in Huntsville" 80 | - family-names: "Ankur" 81 | given-names: "Kumar" 82 | affiliation: "University of Alabama in Huntsville" 83 | - family-names: "Ramasubramanian" 84 | given-names: "Muthukumaran" 85 | affiliation: "University of Alabama in Huntsville" 86 | - family-names: "Gurung" 87 | given-names: "Iksha" 88 | affiliation: "University of Alabama in Huntsville" 89 | - family-names: "Leong" 90 | given-names: "Wei Ji" 91 | affiliation: "Development Seed" 92 | - family-names: "Avery" 93 | given-names: "Ryan" 94 | affiliation: "Development Seed" 95 | - family-names: "Ramachandran" 96 | given-names: "Rahul" 97 | affiliation: "NASA" 98 | - family-names: "Maskey" 99 | given-names: "Manil" 100 | affiliation: "NASA" 101 | - family-names: "Olofossen" 102 | given-names: "Pontus" 103 | affiliation: "NASA" 104 | - family-names: "Fancher" 105 | given-names: "Elizabeth" 106 | affiliation: "Barrios Technology" 107 | - family-names: "Lee" 108 | given-names: "Tsengdar" 109 | affiliation: "NASA" 110 | - family-names: "Murphy" 111 | given-names: "Kevin" 112 | affiliation: "NASA" 113 | - family-names: "Duffy" 114 | given-names: "Dan" 115 | affiliation: "NASA" 116 | - family-names: "Little" 117 | given-names: "Mike" 118 | affiliation: "NASA" 119 | - family-names: "Alemohammad" 120 | given-names: "Hamed" 121 | affiliation: "Clark University" 122 | - family-names: "Cecil" 123 | given-names: "Michael" 124 | affiliation: "Clark University" 125 | - family-names: "Li" 126 | given-names: "Steve" 127 | affiliation: "Clark University" 128 | - family-names: "Khallaghi" 129 | given-names: "Sam" 130 | affiliation: "Clark University" 131 | - family-names: "Godwin" 132 | given-names: "Denys" 133 | affiliation: "Clark University" 134 | - family-names: "Ahmadi" 135 | given-names: "Maryam" 136 | affiliation: "Clark University" 137 | - family-names: "Kordi" 138 | given-names: "Fatemeh" 139 | affiliation: "Clark University" 140 | - family-names: "Saux" 141 | given-names: "Bertrand" 142 | affiliation: "ESA" 143 | - family-names: "Pastick" 144 | given-names: "Neal" 145 | affiliation: "USGS" 146 | - family-names: "Doucette" 147 | given-names: "Peter" 148 | affiliation: "USGS" 149 | - family-names: "Fleckenstein" 150 | given-names: "Rylie" 151 | affiliation: "USGS" 152 | - family-names: "Luanga" 153 | given-names: "Dalton" 154 | affiliation: "DOE/ORNL" 155 | - family-names: "Corvin" 156 | given-names: "Alex" 157 | affiliation: "RedHat" 158 | - family-names: "Granger" 159 | given-names: "Erwan" 160 | affiliation: "RedHat" 161 | title: "HLS Foundation" 162 | doi: https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M 163 | license: "Apache 2.0" 164 | date-released: 2023-08-03 165 | repository-code: "https://github.com/nasa-impact/hls-foundation-os" 166 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 NASA, IBM 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Image segmentation by foundation model finetuning 2 | 3 | This repository shows three examples of how [Prithvi](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M) can be finetuned for downstream tasks. The examples include flood detection using Sentinel-2 data from the [Sen1Floods11](https://github.com/cloudtostreet/Sen1Floods11) dataset, burn scars detection using the [NASA HLS fire scars dataset](https://huggingface.co/datasets/nasa-impact/hls_burn_scars) and multi-temporal crop classification using the [NASA HLS multi-temporal crop classification dataset](https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification). 4 | 5 | ## The approach 6 | ### Background 7 | To finetune for these tasks in this repository, we make use of [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/), which provides an extensible framework for segmentation tasks. 8 | 9 | [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/) allows us to concatenate necks and heads appropriate for any segmentation downstream task to the encoder, and then perform the finetuning. This only requires setting up a config file detailing the desired model architecture, dataset setup and training strategy. 10 | 11 | We build extensions on top of [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/) to support our encoder and provide classes to read and augment remote sensing data (from .tiff files) using [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/) data pipelines. These extensions can be found in the [geospatial_fm](./geospatial_fm/) directory, and they are installed as a package on the top of [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/) for ease of use. If more advanced functionality is necessary, it should be added there. 12 | 13 | ### The pretrained backbone 14 | The pretrained model we work with is a [ViT](https://arxiv.org/abs/2010.11929)operating as a [Masked Autoencoder](https://arxiv.org/abs/2111.06377), trained on [HLS](https://hls.gsfc.nasa.gov/) data. The encoder from this model is made available as the backbone and the weights can be downloaded from Hugging Face [here](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M/blob/main/Prithvi_100M.pt). 15 | 16 | 17 | ### The architectures 18 | We use a simple architecture that adds a neck and segmentation head to the backbone. The neck concatenates and processes the transformer's token based embeddings into an embedding that can be fed into convolutional layers. The head processes this embedding into a segmentation mask. The code for the architecture can be found in [this file](./geospatial_fm/geospatial_fm.py). 19 | 20 | ### The pipeline 21 | Additionally, we provide extra components for data loading pipelines in [geospatial_pipelines.py](./geospatial_fm/geospatial_pipelines.py). These are documented in the file. 22 | 23 | We observe the MMCV convention that all operations assume a channel-last format. 24 | *However*, we also introduce some components with the prefix `Torch`, such as `TorchNormalize`. These components assume the torch convention of channel-first. 25 | 26 | At some point during the pipeline, before feeding the data to the model, it is necessary to change to channel-first format. 27 | We reccomend implementing the change after the `ToTensor` operation (which is also necessary at some point), using the `TorchPermute` operation. 28 | 29 | ### Tutorial 30 | Check out the [exploration notebook](./exploration.ipynb) for a more in depth example of the usage of the model. 31 | 32 | 33 | ## Setup 34 | ### Dependencies 35 | 1. Clone this repository 36 | 2. `conda create -n python==3.9` 37 | 3. `conda activate ` 38 | 4. Install torch (tested for >=1.7.1 and <=1.11.0) and torchvision (tested for >=0.8.2 and <=0.12). May vary with your system. Please check at: https://pytorch.org/get-started/previous-versions/. 39 | 1. e.g.: `pip install torch==1.11.0+cu115 torchvision==0.12.0+cu115 --extra-index-url https://download.pytorch.org/whl/cu115` 40 | 5. `cd` into the cloned repo 41 | 5. `pip install -e .` 42 | 6. `pip install -U openmim` 43 | 7. `mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/{cuda_version}/{torch_version}/index.html`. Note that pre-built wheels (fast installs without needing to build) only exist for some versions of torch and CUDA. Check compatibilities here: https://mmcv.readthedocs.io/en/v1.6.2/get_started/installation.html 44 | 1. e.g.: `mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/cu115/torch1.11.0/index.html` 45 | 46 | 47 | ### Alternate Setup (Windows Users - Tested for Windows 10) 48 | 49 | 1. `conda create -n python=3.9` 50 | 2. `conda activate ` 51 | 3. Install torch (tested for >=1.7.1 and <=1.11.0) and torchvision (tested for >=0.8.2 and <=0.12). May vary with your system. Please check at: https://pytorch.org/get-started/previous-versions/. 52 | 1. e.g.: `pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113` 53 | 4. `git clone https://github.com/NASA-IMPACT/hls-foundation-os.git \hls-foundation-os` 54 | 5. `git clone https://github.com/open-mmlab/mmsegmentation.git \mmsegmentation` 55 | 6. `cd \mmsegmentation` 56 | 7. Checkout mmsegmentation version compatible with hls-foundation: `git checkout 186572a3ce64ac9b6b37e66d58c76515000c3280` 57 | 8. modify setup.py so it installs from the cloned mmsegmentation. Change line `mmsegmentation @ git+https://github.com/open-mmlab/mmsegmentation.git@186572a3ce64ac9b6b37e66d58c76515000c3280` to `mmsegmentation @ file:////mmsegmentation` 58 | 9. `cd \hls-foundation-os` 59 | 10. `pip install -e .` 60 | 11. `pip install -U openmim` 61 | 12. `mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/{cuda_version}/{torch_version}/index.html`. Note that pre-built wheels (fast installs without needing to build) only exist for some versions of torch and CUDA. Check compatibilities here: https://mmcv.readthedocs.io/en/v1.6.2/get_started/installation.html 62 | 1. e.g.: `mim install mmcv-full==1.6.2 -f https://download.openmmlab.com/mmcv/dist/cu115/torch1.11.0/index.html` 63 | 13. `conda install -c conda-forge opencv` 64 | 14. `pip install datasets` 65 | 66 | ### Data 67 | 68 | The flood detection dataset can be downloaded from [Sen1Floods11](https://github.com/cloudtostreet/Sen1Floods11). Splits in the `mmsegmentation` format are available in the `data_splits` folders. 69 | 70 | 71 | The [NASA HLS fire scars dataset](https://huggingface.co/datasets/nasa-impact/hls_burn_scars) can be downloaded from Hugging Face. 72 | 73 | The [NASA HLS multi-temporal crop classification dataset](https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification) can be downloaded from Hugging Face. 74 | 75 | Using git-lfs you can download the data as in the following example: 76 | ``` sh 77 | # from: https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification 78 | 79 | # Make sure you have git-lfs installed (https://git-lfs.com) 80 | git lfs install 81 | git clone https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification 82 | 83 | # extract files 84 | cd multi-temporal-crop-classification 85 | tar -xvf training_chips.tgz && tar -xvf validation_chips.tgz 86 | ``` 87 | 88 | Without git-lfs (Credit @robmarkcole): 89 | ```sh 90 | mkdir data 91 | cd data 92 | 93 | mkdir multi-temporal-crop-classification 94 | cd multi-temporal-crop-classification 95 | 96 | # not this can take some time and appear to hang, be patient 97 | wget https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification/resolve/main/training_chips.tgz?download=true -O training_chips.tgz 98 | tar -xvzf training_chips.tgz 99 | 100 | wget https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification/resolve/main/validation_chips.tgz?download=true -O validation_chips.tgz 101 | tar -xvzf validation_chips.tgz 102 | 103 | # delete some mac-os added files 104 | find . -name '._*' -delete 105 | 106 | # the following are NOT required (TBC) 107 | https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification/resolve/main/training_data.txt 108 | https://huggingface.co/datasets/ibm-nasa-geospatial/multi-temporal-crop-classification/resolve/main/validation_data.txt 109 | 110 | # instead copy over the files from the splits directory to the location of the images 111 | 112 | cd .. 113 | mkdir hls_burn_scars 114 | cd hls_burn_scars 115 | wget https://huggingface.co/datasets/ibm-nasa-geospatial/hls_burn_scars/resolve/main/hls_burn_scars.tar.gz?download=true -O hls_burn_scars.tar.gz 116 | tar -xvf hls_burn_scars.tar.gz 117 | ``` 118 | 119 | ## Running the finetuning 120 | 1. In the `configs` folder there are three config examples for the three segmentation tasks. Complete the configs with your setup specifications. Parts that must be completed are marked with `#TO BE DEFINED BY USER`. They relate to the location where you downloaded the dataset, pretrained model weights, the test set (e.g. regular one or Bolivia out of bag data) and where you are going to save the experiment outputs. 121 | 122 | 2. 123 | a. With the conda env created above activated, run: 124 | 125 | `mim train mmsegmentation configs/sen1floods11_config.py` or 126 | 127 | `mim train mmsegmentation configs/burn_scars.py` or 128 | 129 | `mim train mmsegmentation configs/multi_temporal_crop_classification.py` 130 | 131 | b. Multi-gpu training can be run by adding `--launcher pytorch --gpus ` 132 | 133 | c. To run testing: 134 | 135 | `mim test mmsegmentation configs/sen1floods11_config.py --checkpoint /path/to/best/checkpoint/model.pth --eval "mIoU"` or 136 | 137 | `mim test mmsegmentation configs/burn_scars.py --checkpoint /path/to/best/checkpoint/model.pth --eval "mIoU"` or 138 | 139 | `mim test mmsegmentation configs/multi_temporal_crop_classification.py --checkpoint /path/to/best/checkpoint/model.pth --eval "mIoU"` 140 | 141 | ## Checkpoints on Hugging Face 142 | We also provide checkpoints on Hugging Face for the [burn scars detection](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M-burn-scar) and the [multi temporal crop classification tasks](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification). 143 | 144 | ## Running the inference 145 | We provide a script to run inference on new data in GeoTIFF format. The data can be of any shape (e.g. height and width) as long as it follows the bands/channels of the original dataset. An example is shown below. 146 | 147 | ``` 148 | python model_inference.py -config /path/to/config/config.py -ckpt /path/to/checkpoint/checkpoint.pth -input /input/folder/ -output /output/folder/ -input_type tif -bands 0 1 2 3 4 5 149 | ``` 150 | 151 | The `bands` parameter is useful in case the files used to run inference have the data in different orders/indexes than the original dataset. 152 | 153 | ## Additional documentation 154 | This project builds on [MMSegmentation](https://mmsegmentation.readthedocs.io/en/0.x/) and [MMCV](https://mmcv.readthedocs.io/en/v1.5.0/). For additional documentation, consult their docs (please note this is currently version 0.30.0 of MMSegmentation and version 1.5.0 of MMCV, not latest). 155 | 156 | ## Citation 157 | 158 | If this repository helped your research, please cite `HLS foundation` in your publications. Here is an example BibTeX entry: 159 | 160 | ``` 161 | @software{HLS_Foundation_2023, 162 | author = {Jakubik, Johannes and Chu, Linsong and Fraccaro, Paolo and Bangalore, Ranjini and Lambhate, Devyani and Das, Kamal and Oliveira Borges, Dario and Kimura, Daiki and Simumba, Naomi and Szwarcman, Daniela and Muszynski, Michal and Weldemariam, Kommy and Zadrozny, Bianca and Ganti, Raghu and Costa, Carlos and Watson, Campbell and Mukkavilli, Karthik and Roy, Sujit and Phillips, Christopher and Ankur, Kumar and Ramasubramanian, Muthukumaran and Gurung, Iksha and Leong, Wei Ji and Avery, Ryan and Ramachandran, Rahul and Maskey, Manil and Olofossen, Pontus and Fancher, Elizabeth and Lee, Tsengdar and Murphy, Kevin and Duffy, Dan and Little, Mike and Alemohammad, Hamed and Cecil, Michael and Li, Steve and Khallaghi, Sam and Godwin, Denys and Ahmadi, Maryam and Kordi, Fatemeh and Saux, Bertrand and Pastick, Neal and Doucette, Peter and Fleckenstein, Rylie and Luanga, Dalton and Corvin, Alex and Granger, Erwan}, 163 | doi = {10.57967/hf/0952}, 164 | month = aug, 165 | title = {{HLS Foundation}}, 166 | repository-code = {https://github.com/nasa-impact/hls-foundation-os}, 167 | year = {2023} 168 | } 169 | ``` 170 | -------------------------------------------------------------------------------- /configs/burn_scars.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | custom_imports = dict(imports=["geospatial_fm"]) 4 | 5 | # base options 6 | dist_params = dict(backend="nccl") 7 | log_level = "INFO" 8 | load_from = None 9 | resume_from = None 10 | cudnn_benchmark = True 11 | 12 | dataset_type = "GeospatialDataset" 13 | 14 | # TO BE DEFINED BY USER: data directory 15 | data_root = "" 16 | 17 | num_frames = 1 18 | img_size = 224 19 | num_workers = 4 20 | samples_per_gpu = 4 21 | 22 | img_norm_cfg = dict( 23 | means=[ 24 | 0.033349706741586264, 25 | 0.05701185520536176, 26 | 0.05889748132001316, 27 | 0.2323245113436119, 28 | 0.1972854853760658, 29 | 0.11944914225186566, 30 | ], 31 | stds=[ 32 | 0.02269135568823774, 33 | 0.026807560223070237, 34 | 0.04004109844362779, 35 | 0.07791732423672691, 36 | 0.08708738838140137, 37 | 0.07241979477437814, 38 | ], 39 | ) # change the mean and std of all the bands 40 | 41 | bands = [0, 1, 2, 3, 4, 5] 42 | tile_size = 224 43 | orig_nsize = 512 44 | crop_size = (tile_size, tile_size) 45 | img_suffix = "_merged.tif" 46 | seg_map_suffix = ".mask.tif" 47 | ignore_index = -1 48 | image_nodata = -9999 49 | image_nodata_replace = 0 50 | image_to_float32 = True 51 | 52 | # model 53 | # TO BE DEFINED BY USER: model path 54 | pretrained_weights_path = "" 55 | num_layers = 12 56 | patch_size = 16 57 | embed_dim = 768 58 | num_heads = 12 59 | tubelet_size = 1 60 | output_embed_dim = num_frames * embed_dim 61 | max_intervals = 10000 62 | evaluation_interval = 1000 63 | 64 | # TO BE DEFINED BY USER: model path 65 | experiment = "" 66 | project_dir = "" 67 | work_dir = os.path.join(project_dir, experiment) 68 | save_path = work_dir 69 | 70 | save_path = work_dir 71 | train_pipeline = [ 72 | dict(type="LoadGeospatialImageFromFile", to_float32=image_to_float32), 73 | dict(type="LoadGeospatialAnnotations", reduce_zero_label=False), 74 | dict(type="BandsExtract", bands=bands), 75 | dict(type="RandomFlip", prob=0.5), 76 | dict(type="ToTensor", keys=["img", "gt_semantic_seg"]), 77 | # to channels first 78 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 79 | dict(type="TorchNormalize", **img_norm_cfg), 80 | dict(type="TorchRandomCrop", crop_size=(tile_size, tile_size)), 81 | dict( 82 | type="Reshape", 83 | keys=["img"], 84 | new_shape=(len(bands), num_frames, tile_size, tile_size), 85 | ), 86 | dict(type="Reshape", keys=["gt_semantic_seg"], new_shape=(1, tile_size, tile_size)), 87 | dict(type="CastTensor", keys=["gt_semantic_seg"], new_type="torch.LongTensor"), 88 | dict(type="Collect", keys=["img", "gt_semantic_seg"]), 89 | ] 90 | test_pipeline = [ 91 | dict(type="LoadGeospatialImageFromFile", to_float32=image_to_float32), 92 | dict(type="BandsExtract", bands=bands), 93 | dict(type="ToTensor", keys=["img"]), 94 | # to channels first 95 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 96 | dict(type="TorchNormalize", **img_norm_cfg), 97 | dict( 98 | type="Reshape", 99 | keys=["img"], 100 | new_shape=(len(bands), num_frames, -1, -1), 101 | look_up=dict({"2": 1, "3": 2}), 102 | ), 103 | dict(type="CastTensor", keys=["img"], new_type="torch.FloatTensor"), 104 | dict( 105 | type="CollectTestList", 106 | keys=["img"], 107 | meta_keys=[ 108 | "img_info", 109 | "seg_fields", 110 | "img_prefix", 111 | "seg_prefix", 112 | "filename", 113 | "ori_filename", 114 | "img", 115 | "img_shape", 116 | "ori_shape", 117 | "pad_shape", 118 | "scale_factor", 119 | "img_norm_cfg", 120 | ], 121 | ), 122 | ] 123 | 124 | CLASSES = ("Unburnt land", "Burn scar") 125 | 126 | data = dict( 127 | samples_per_gpu=samples_per_gpu, 128 | workers_per_gpu=num_workers, 129 | train=dict( 130 | type=dataset_type, 131 | CLASSES=CLASSES, 132 | data_root=data_root, 133 | img_dir="training", 134 | ann_dir="training", 135 | img_suffix=img_suffix, 136 | seg_map_suffix=seg_map_suffix, 137 | pipeline=train_pipeline, 138 | ignore_index=-1, 139 | ), 140 | val=dict( 141 | type=dataset_type, 142 | CLASSES=CLASSES, 143 | data_root=data_root, 144 | img_dir="validation", 145 | ann_dir="validation", 146 | img_suffix=img_suffix, 147 | seg_map_suffix=seg_map_suffix, 148 | pipeline=test_pipeline, 149 | ignore_index=-1, 150 | ), 151 | test=dict( 152 | type=dataset_type, 153 | CLASSES=CLASSES, 154 | data_root=data_root, 155 | img_dir="validation", 156 | ann_dir="validation", 157 | img_suffix=img_suffix, 158 | seg_map_suffix=seg_map_suffix, 159 | pipeline=test_pipeline, 160 | ignore_index=-1, 161 | ), 162 | ) 163 | 164 | optimizer = dict(type="Adam", lr=1.3e-05, betas=(0.9, 0.999)) 165 | optimizer_config = dict(grad_clip=None) 166 | lr_config = dict( 167 | policy="poly", 168 | warmup="linear", 169 | warmup_iters=1500, 170 | warmup_ratio=1e-06, 171 | power=1.0, 172 | min_lr=0.0, 173 | by_epoch=False, 174 | ) 175 | log_config = dict( 176 | interval=20, 177 | hooks=[ 178 | dict(type="TextLoggerHook", by_epoch=False), 179 | dict(type="TensorboardLoggerHook", by_epoch=False), 180 | ], 181 | ) 182 | checkpoint_config = dict(by_epoch=True, interval=10, out_dir=save_path) 183 | evaluation = dict( 184 | interval=evaluation_interval, 185 | metric="mIoU", 186 | pre_eval=True, 187 | save_best="mIoU", 188 | by_epoch=False, 189 | ) 190 | 191 | loss_func = dict(type="DiceLoss", use_sigmoid=False, loss_weight=1, ignore_index=-1) 192 | 193 | runner = dict(type="IterBasedRunner", max_iters=max_intervals) 194 | workflow = [("train", 1)] 195 | norm_cfg = dict(type="BN", requires_grad=True) 196 | model = dict( 197 | type="TemporalEncoderDecoder", 198 | frozen_backbone=False, 199 | backbone=dict( 200 | type="TemporalViTEncoder", 201 | pretrained=pretrained_weights_path, 202 | img_size=img_size, 203 | patch_size=patch_size, 204 | num_frames=num_frames, 205 | tubelet_size=tubelet_size, 206 | in_chans=len(bands), 207 | embed_dim=embed_dim, 208 | depth=12, 209 | num_heads=num_heads, 210 | mlp_ratio=4.0, 211 | norm_pix_loss=False, 212 | ), 213 | neck=dict( 214 | type="ConvTransformerTokensToEmbeddingNeck", 215 | embed_dim=embed_dim * num_frames, 216 | output_embed_dim=output_embed_dim, 217 | drop_cls_token=True, 218 | Hp=14, 219 | Wp=14, 220 | ), 221 | decode_head=dict( 222 | num_classes=len(CLASSES), 223 | in_channels=output_embed_dim, 224 | type="FCNHead", 225 | in_index=-1, 226 | channels=256, 227 | num_convs=1, 228 | concat_input=False, 229 | dropout_ratio=0.1, 230 | norm_cfg=dict(type="BN", requires_grad=True), 231 | align_corners=False, 232 | loss_decode=loss_func, 233 | ), 234 | auxiliary_head=dict( 235 | num_classes=len(CLASSES), 236 | in_channels=output_embed_dim, 237 | type="FCNHead", 238 | in_index=-1, 239 | channels=256, 240 | num_convs=2, 241 | concat_input=False, 242 | dropout_ratio=0.1, 243 | norm_cfg=dict(type="BN", requires_grad=True), 244 | align_corners=False, 245 | loss_decode=loss_func, 246 | ), 247 | train_cfg=dict(), 248 | test_cfg=dict( 249 | mode="slide", 250 | stride=(int(tile_size / 2), int(tile_size / 2)), 251 | crop_size=(tile_size, tile_size), 252 | ), 253 | ) 254 | auto_resume = False 255 | -------------------------------------------------------------------------------- /configs/multi_temporal_crop_classification.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | dist_params = dict(backend="nccl") 4 | log_level = "INFO" 5 | load_from = None 6 | resume_from = None 7 | cudnn_benchmark = True 8 | custom_imports = dict(imports=["geospatial_fm"]) 9 | num_frames = 3 10 | img_size = 224 11 | num_workers = 2 12 | 13 | # model 14 | # TO BE DEFINED BY USER: model path 15 | pretrained_weights_path = "" 16 | num_layers = 6 17 | patch_size = 16 18 | embed_dim = 768 19 | num_heads = 8 20 | tubelet_size = 1 21 | max_epochs = 80 22 | eval_epoch_interval = 5 23 | 24 | loss_weights_multi = [ 25 | 0.386375, 26 | 0.661126, 27 | 0.548184, 28 | 0.640482, 29 | 0.876862, 30 | 0.925186, 31 | 3.249462, 32 | 1.542289, 33 | 2.175141, 34 | 2.272419, 35 | 3.062762, 36 | 3.626097, 37 | 1.198702, 38 | ] 39 | loss_func = dict( 40 | type="CrossEntropyLoss", 41 | use_sigmoid=False, 42 | class_weight=loss_weights_multi, 43 | avg_non_ignore=True, 44 | ) 45 | output_embed_dim = embed_dim * num_frames 46 | 47 | 48 | # TO BE DEFINED BY USER: Save directory 49 | experiment = "" 50 | project_dir = "" 51 | work_dir = os.path.join(project_dir, experiment) 52 | save_path = work_dir 53 | 54 | 55 | dataset_type = "GeospatialDataset" 56 | 57 | # TO BE DEFINED BY USER: data directory 58 | data_root = "" 59 | 60 | img_norm_cfg = dict( 61 | means=[ 62 | 494.905781, 63 | 815.239594, 64 | 924.335066, 65 | 2968.881459, 66 | 2634.621962, 67 | 1739.579917, 68 | 494.905781, 69 | 815.239594, 70 | 924.335066, 71 | 2968.881459, 72 | 2634.621962, 73 | 1739.579917, 74 | 494.905781, 75 | 815.239594, 76 | 924.335066, 77 | 2968.881459, 78 | 2634.621962, 79 | 1739.579917, 80 | ], 81 | stds=[ 82 | 284.925432, 83 | 357.84876, 84 | 575.566823, 85 | 896.601013, 86 | 951.900334, 87 | 921.407808, 88 | 284.925432, 89 | 357.84876, 90 | 575.566823, 91 | 896.601013, 92 | 951.900334, 93 | 921.407808, 94 | 284.925432, 95 | 357.84876, 96 | 575.566823, 97 | 896.601013, 98 | 951.900334, 99 | 921.407808, 100 | ], 101 | ) 102 | 103 | bands = [0, 1, 2, 3, 4, 5] 104 | 105 | tile_size = 224 106 | orig_nsize = 512 107 | crop_size = (tile_size, tile_size) 108 | train_pipeline = [ 109 | dict(type="LoadGeospatialImageFromFile", to_float32=True), 110 | dict(type="LoadGeospatialAnnotations", reduce_zero_label=True), 111 | dict(type="RandomFlip", prob=0.5), 112 | dict(type="ToTensor", keys=["img", "gt_semantic_seg"]), 113 | # to channels first 114 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 115 | dict(type="TorchNormalize", **img_norm_cfg), 116 | dict(type="TorchRandomCrop", crop_size=crop_size), 117 | dict( 118 | type="Reshape", 119 | keys=["img"], 120 | new_shape=(len(bands), num_frames, tile_size, tile_size), 121 | ), 122 | dict(type="Reshape", keys=["gt_semantic_seg"], new_shape=(1, tile_size, tile_size)), 123 | dict(type="CastTensor", keys=["gt_semantic_seg"], new_type="torch.LongTensor"), 124 | dict(type="Collect", keys=["img", "gt_semantic_seg"]), 125 | ] 126 | 127 | test_pipeline = [ 128 | dict(type="LoadGeospatialImageFromFile", to_float32=True), 129 | dict(type="ToTensor", keys=["img"]), 130 | # to channels first 131 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 132 | dict(type="TorchNormalize", **img_norm_cfg), 133 | dict( 134 | type="Reshape", 135 | keys=["img"], 136 | new_shape=(len(bands), num_frames, -1, -1), 137 | look_up=dict({"2": 1, "3": 2}), 138 | ), 139 | dict(type="CastTensor", keys=["img"], new_type="torch.FloatTensor"), 140 | dict( 141 | type="CollectTestList", 142 | keys=["img"], 143 | meta_keys=[ 144 | "img_info", 145 | "seg_fields", 146 | "img_prefix", 147 | "seg_prefix", 148 | "filename", 149 | "ori_filename", 150 | "img", 151 | "img_shape", 152 | "ori_shape", 153 | "pad_shape", 154 | "scale_factor", 155 | "img_norm_cfg", 156 | ], 157 | ), 158 | ] 159 | 160 | CLASSES = ( 161 | "Natural Vegetation", 162 | "Forest", 163 | "Corn", 164 | "Soybeans", 165 | "Wetlands", 166 | "Developed/Barren", 167 | "Open Water", 168 | "Winter Wheat", 169 | "Alfalfa", 170 | "Fallow/Idle Cropland", 171 | "Cotton", 172 | "Sorghum", 173 | "Other", 174 | ) 175 | 176 | dataset = "GeospatialDataset" 177 | data = dict( 178 | samples_per_gpu=8, 179 | workers_per_gpu=4, 180 | train=dict( 181 | type=dataset, 182 | CLASSES=CLASSES, 183 | reduce_zero_label=True, 184 | data_root=data_root, 185 | img_dir="training_chips", 186 | ann_dir="training_chips", 187 | pipeline=train_pipeline, 188 | img_suffix="_merged.tif", 189 | seg_map_suffix=".mask.tif", 190 | ), 191 | val=dict( 192 | type=dataset, 193 | CLASSES=CLASSES, 194 | reduce_zero_label=True, 195 | data_root=data_root, 196 | img_dir="validation_chips", 197 | ann_dir="validation_chips", 198 | pipeline=test_pipeline, 199 | img_suffix="_merged.tif", 200 | seg_map_suffix=".mask.tif", 201 | ), 202 | test=dict( 203 | type=dataset, 204 | CLASSES=CLASSES, 205 | reduce_zero_label=True, 206 | data_root=data_root, 207 | img_dir="validation_chips", 208 | ann_dir="validation_chips", 209 | pipeline=test_pipeline, 210 | img_suffix="_merged.tif", 211 | seg_map_suffix=".mask.tif", 212 | ), 213 | ) 214 | 215 | optimizer = dict(type="Adam", lr=1.5e-05, betas=(0.9, 0.999), weight_decay=0.05) 216 | optimizer_config = dict(grad_clip=None) 217 | lr_config = dict( 218 | policy="poly", 219 | warmup="linear", 220 | warmup_iters=1500, 221 | warmup_ratio=1e-06, 222 | power=1.0, 223 | min_lr=0.0, 224 | by_epoch=False, 225 | ) 226 | log_config = dict( 227 | interval=10, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] 228 | ) 229 | 230 | checkpoint_config = dict(by_epoch=True, interval=100, out_dir=save_path) 231 | 232 | evaluation = dict( 233 | interval=eval_epoch_interval, 234 | metric="mIoU", 235 | pre_eval=True, 236 | save_best="mIoU", 237 | by_epoch=True, 238 | ) 239 | reduce_train_set = dict(reduce_train_set=False) 240 | reduce_factor = dict(reduce_factor=1) 241 | runner = dict(type="EpochBasedRunner", max_epochs=max_epochs) 242 | workflow = [("train", 1)] 243 | norm_cfg = dict(type="BN", requires_grad=True) 244 | 245 | model = dict( 246 | type="TemporalEncoderDecoder", 247 | frozen_backbone=False, 248 | backbone=dict( 249 | type="TemporalViTEncoder", 250 | pretrained=pretrained_weights_path, 251 | img_size=img_size, 252 | patch_size=patch_size, 253 | num_frames=num_frames, 254 | tubelet_size=1, 255 | in_chans=len(bands), 256 | embed_dim=embed_dim, 257 | depth=6, 258 | num_heads=num_heads, 259 | mlp_ratio=4.0, 260 | norm_pix_loss=False, 261 | ), 262 | neck=dict( 263 | type="ConvTransformerTokensToEmbeddingNeck", 264 | embed_dim=embed_dim * num_frames, 265 | output_embed_dim=output_embed_dim, 266 | drop_cls_token=True, 267 | Hp=14, 268 | Wp=14, 269 | ), 270 | decode_head=dict( 271 | num_classes=len(CLASSES), 272 | in_channels=output_embed_dim, 273 | type="FCNHead", 274 | in_index=-1, 275 | channels=256, 276 | num_convs=1, 277 | concat_input=False, 278 | dropout_ratio=0.1, 279 | norm_cfg=dict(type="BN", requires_grad=True), 280 | align_corners=False, 281 | loss_decode=loss_func, 282 | ), 283 | auxiliary_head=dict( 284 | num_classes=len(CLASSES), 285 | in_channels=output_embed_dim, 286 | type="FCNHead", 287 | in_index=-1, 288 | channels=256, 289 | num_convs=2, 290 | concat_input=False, 291 | dropout_ratio=0.1, 292 | norm_cfg=dict(type="BN", requires_grad=True), 293 | align_corners=False, 294 | loss_decode=loss_func, 295 | ), 296 | train_cfg=dict(), 297 | test_cfg=dict( 298 | mode="slide", 299 | stride=(int(tile_size / 2), int(tile_size / 2)), 300 | crop_size=(tile_size, tile_size), 301 | ), 302 | ) 303 | auto_resume = False 304 | -------------------------------------------------------------------------------- /configs/sen1floods11_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # base options 4 | dist_params = dict(backend="nccl") 5 | log_level = "INFO" 6 | load_from = None 7 | resume_from = None 8 | cudnn_benchmark = True 9 | 10 | custom_imports = dict(imports=["geospatial_fm"]) 11 | 12 | 13 | ### Configs 14 | # Data 15 | # TO BE DEFINED BY USER: Data root to sen1floods11 downloaded dataset 16 | data_root = "" 17 | 18 | dataset_type = "GeospatialDataset" 19 | num_classes = 2 20 | num_frames = 1 21 | img_size = 224 22 | num_workers = 2 23 | samples_per_gpu = 4 24 | CLASSES = (0, 1) 25 | 26 | img_norm_cfg = dict( 27 | means=[0.14245495, 0.13921481, 0.12434631, 0.31420089, 0.20743526, 0.12046503], 28 | stds=[0.04036231, 0.04186983, 0.05267646, 0.0822221, 0.06834774, 0.05294205], 29 | ) 30 | 31 | bands = [1, 2, 3, 8, 11, 12] 32 | tile_size = img_size 33 | orig_nsize = 512 34 | crop_size = (tile_size, tile_size) 35 | 36 | img_dir = data_root + "v1.1/data/flood_events/HandLabeled/S2Hand" 37 | ann_dir = data_root + "v1.1/data/flood_events/HandLabeled/LabelHand" 38 | img_suffix = f"_S2Hand.tif" 39 | seg_map_suffix = f"_LabelHand.tif" 40 | 41 | splits = { 42 | "train": "data_splits/sen1floods11/train_split.txt", 43 | "val": "data_splits/sen1floods11/val_split.txt", 44 | "test": "data_splits/sen1floods11/test_split.txt", 45 | } 46 | splits = {k: os.path.abspath(v) for (k, v) in splits.items()} 47 | 48 | ignore_index = 2 49 | label_nodata = -1 50 | image_nodata = -9999 51 | image_nodata_replace = 0 52 | constant = 0.0001 53 | 54 | # Model 55 | # TO BE DEFINED BY USER: path to pretrained backbone weights 56 | pretrained_weights_path = "" 57 | num_layers = 12 58 | patch_size = 16 59 | embed_dim = 768 60 | num_heads = 12 61 | tubelet_size = 1 62 | 63 | # TRAINING 64 | epochs = 100 65 | eval_epoch_interval = 5 66 | 67 | # TO BE DEFINED BY USER: Save directory 68 | experiment = "" 69 | project_dir = "" 70 | work_dir = os.path.join(project_dir, experiment) 71 | save_path = work_dir 72 | 73 | # Pipelines 74 | train_pipeline = [ 75 | dict( 76 | type="LoadGeospatialImageFromFile", 77 | to_float32=False, 78 | nodata=image_nodata, 79 | nodata_replace=image_nodata_replace, 80 | ), 81 | dict( 82 | type="LoadGeospatialAnnotations", 83 | reduce_zero_label=False, 84 | nodata=label_nodata, 85 | nodata_replace=ignore_index, 86 | ), 87 | dict(type="BandsExtract", bands=bands), 88 | dict(type="ConstantMultiply", constant=constant), 89 | dict(type="RandomFlip", prob=0.5), 90 | dict(type="ToTensor", keys=["img", "gt_semantic_seg"]), 91 | # to channels first 92 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 93 | dict(type="TorchNormalize", **img_norm_cfg), 94 | dict(type="TorchRandomCrop", crop_size=crop_size), 95 | dict( 96 | type="Reshape", 97 | keys=["img"], 98 | new_shape=(len(bands), num_frames, tile_size, tile_size), 99 | ), 100 | dict(type="Reshape", keys=["gt_semantic_seg"], new_shape=(1, tile_size, tile_size)), 101 | dict(type="CastTensor", keys=["gt_semantic_seg"], new_type="torch.LongTensor"), 102 | dict(type="Collect", keys=["img", "gt_semantic_seg"]), 103 | ] 104 | 105 | 106 | test_pipeline = [ 107 | dict( 108 | type="LoadGeospatialImageFromFile", 109 | to_float32=False, 110 | nodata=image_nodata, 111 | nodata_replace=image_nodata_replace, 112 | ), 113 | dict(type="BandsExtract", bands=bands), 114 | dict(type="ConstantMultiply", constant=constant), 115 | dict(type="ToTensor", keys=["img"]), 116 | # to channels first 117 | dict(type="TorchPermute", keys=["img"], order=(2, 0, 1)), 118 | dict(type="TorchNormalize", **img_norm_cfg), 119 | dict( 120 | type="Reshape", 121 | keys=["img"], 122 | new_shape=(len(bands), num_frames, -1, -1), 123 | look_up={"2": 1, "3": 2}, 124 | ), 125 | dict(type="CastTensor", keys=["img"], new_type="torch.FloatTensor"), 126 | dict( 127 | type="CollectTestList", 128 | keys=["img"], 129 | meta_keys=[ 130 | "img_info", 131 | "seg_fields", 132 | "img_prefix", 133 | "seg_prefix", 134 | "filename", 135 | "ori_filename", 136 | "img", 137 | "img_shape", 138 | "ori_shape", 139 | "pad_shape", 140 | "scale_factor", 141 | "img_norm_cfg", 142 | ], 143 | ), 144 | ] 145 | 146 | # Dataset 147 | data = dict( 148 | samples_per_gpu=samples_per_gpu, 149 | workers_per_gpu=num_workers, 150 | train=dict( 151 | type=dataset_type, 152 | CLASSES=CLASSES, 153 | data_root=data_root, 154 | img_dir=img_dir, 155 | ann_dir=ann_dir, 156 | img_suffix=img_suffix, 157 | seg_map_suffix=seg_map_suffix, 158 | pipeline=train_pipeline, 159 | ignore_index=ignore_index, 160 | split=splits["train"], 161 | ), 162 | val=dict( 163 | type=dataset_type, 164 | CLASSES=CLASSES, 165 | data_root=data_root, 166 | img_dir=img_dir, 167 | ann_dir=ann_dir, 168 | img_suffix=img_suffix, 169 | seg_map_suffix=seg_map_suffix, 170 | pipeline=test_pipeline, 171 | ignore_index=ignore_index, 172 | split=splits["val"], 173 | gt_seg_map_loader_cfg=dict(nodata=label_nodata, nodata_replace=ignore_index), 174 | ), 175 | test=dict( 176 | type=dataset_type, 177 | CLASSES=CLASSES, 178 | data_root=data_root, 179 | img_dir=img_dir, 180 | ann_dir=ann_dir, 181 | img_suffix=img_suffix, 182 | seg_map_suffix=seg_map_suffix, 183 | pipeline=test_pipeline, 184 | ignore_index=ignore_index, 185 | split=splits["test"], 186 | gt_seg_map_loader_cfg=dict(nodata=label_nodata, nodata_replace=ignore_index), 187 | ), 188 | ) 189 | 190 | # Training 191 | optimizer = dict( 192 | type="AdamW", 193 | lr=1.5e-5, 194 | weight_decay=0.05, 195 | betas=(0.9, 0.999), 196 | ) 197 | optimizer_config = dict(grad_clip=None) 198 | lr_config = dict( 199 | policy="poly", 200 | warmup="linear", 201 | warmup_iters=1500, 202 | warmup_ratio=1e-6, 203 | power=1.0, 204 | min_lr=0.0, 205 | by_epoch=False, 206 | ) 207 | 208 | log_config = dict( 209 | interval=10, 210 | hooks=[ 211 | dict(type="TextLoggerHook", by_epoch=True), 212 | dict(type="TensorboardLoggerHook", by_epoch=True), 213 | ], 214 | ) 215 | 216 | checkpoint_config = dict(by_epoch=True, interval=10, out_dir=save_path) 217 | 218 | evaluation = dict( 219 | interval=eval_epoch_interval, 220 | metric="mIoU", 221 | pre_eval=True, 222 | save_best="mIoU", 223 | by_epoch=True, 224 | ) 225 | 226 | runner = dict(type="EpochBasedRunner", max_epochs=epochs) 227 | 228 | workflow = [("train", 1), ("val", 1)] 229 | 230 | norm_cfg = dict(type="BN", requires_grad=True) 231 | 232 | ce_weights = [0.3, 0.7] 233 | 234 | model = dict( 235 | type="TemporalEncoderDecoder", 236 | frozen_backbone=False, 237 | backbone=dict( 238 | type="TemporalViTEncoder", 239 | pretrained=pretrained_weights_path, 240 | img_size=img_size, 241 | patch_size=patch_size, 242 | num_frames=num_frames, 243 | tubelet_size=1, 244 | in_chans=len(bands), 245 | embed_dim=embed_dim, 246 | depth=num_layers, 247 | num_heads=num_heads, 248 | mlp_ratio=4.0, 249 | norm_pix_loss=False, 250 | ), 251 | neck=dict( 252 | type="ConvTransformerTokensToEmbeddingNeck", 253 | embed_dim=num_frames * embed_dim, 254 | output_embed_dim=embed_dim, 255 | drop_cls_token=True, 256 | Hp=img_size // patch_size, 257 | Wp=img_size // patch_size, 258 | ), 259 | decode_head=dict( 260 | num_classes=num_classes, 261 | in_channels=embed_dim, 262 | type="FCNHead", 263 | in_index=-1, 264 | ignore_index=ignore_index, 265 | channels=256, 266 | num_convs=1, 267 | concat_input=False, 268 | dropout_ratio=0.1, 269 | norm_cfg=norm_cfg, 270 | align_corners=False, 271 | loss_decode=dict( 272 | type="CrossEntropyLoss", 273 | use_sigmoid=False, 274 | loss_weight=1, 275 | class_weight=ce_weights, 276 | avg_non_ignore=True, 277 | ), 278 | ), 279 | auxiliary_head=dict( 280 | num_classes=num_classes, 281 | in_channels=embed_dim, 282 | ignore_index=ignore_index, 283 | type="FCNHead", 284 | in_index=-1, 285 | channels=256, 286 | num_convs=2, 287 | concat_input=False, 288 | dropout_ratio=0.1, 289 | norm_cfg=norm_cfg, 290 | align_corners=False, 291 | loss_decode=dict( 292 | type="CrossEntropyLoss", 293 | use_sigmoid=False, 294 | loss_weight=1, 295 | class_weight=ce_weights, 296 | avg_non_ignore=True, 297 | ), 298 | ), 299 | train_cfg=dict(), 300 | test_cfg=dict( 301 | mode="slide", 302 | stride=(int(tile_size / 2), int(tile_size / 2)), 303 | crop_size=(tile_size, tile_size), 304 | ), 305 | ) 306 | -------------------------------------------------------------------------------- /data_splits/multi_temporal_crop_classification/training_data.txt: -------------------------------------------------------------------------------- 1 | chip_257_266 2 | chip_328_501 3 | chip_171_477 4 | chip_236_281 5 | chip_134_482 6 | chip_120_493 7 | chip_161_390 8 | chip_102_442 9 | chip_129_508 10 | chip_213_402 11 | chip_417_328 12 | chip_198_322 13 | chip_114_283 14 | chip_109_419 15 | chip_108_418 16 | chip_237_362 17 | chip_152_478 18 | chip_236_291 19 | chip_108_414 20 | chip_352_543 21 | chip_387_331 22 | chip_228_292 23 | chip_129_472 24 | chip_257_275 25 | chip_130_475 26 | chip_134_440 27 | chip_111_416 28 | chip_250_584 29 | chip_141_474 30 | chip_241_442 31 | chip_228_329 32 | chip_248_592 33 | chip_344_537 34 | chip_169_516 35 | chip_116_114 36 | chip_231_302 37 | chip_158_523 38 | chip_216_335 39 | chip_358_324 40 | chip_139_473 41 | chip_257_267 42 | chip_237_189 43 | chip_148_472 44 | chip_147_470 45 | chip_095_349 46 | chip_246_593 47 | chip_321_532 48 | chip_103_320 49 | chip_347_398 50 | chip_137_437 51 | chip_193_406 52 | chip_246_584 53 | chip_323_314 54 | chip_238_286 55 | chip_039_108 56 | chip_051_166 57 | chip_320_534 58 | chip_245_593 59 | chip_131_445 60 | chip_107_415 61 | chip_131_506 62 | chip_322_533 63 | chip_240_332 64 | chip_319_516 65 | chip_147_444 66 | chip_136_478 67 | chip_212_296 68 | chip_194_313 69 | chip_122_510 70 | chip_356_567 71 | chip_279_551 72 | chip_173_468 73 | chip_234_370 74 | chip_208_392 75 | chip_126_133 76 | chip_234_435 77 | chip_197_261 78 | chip_122_444 79 | chip_063_354 80 | chip_100_102 81 | chip_210_390 82 | chip_155_413 83 | chip_300_543 84 | chip_240_273 85 | chip_311_469 86 | chip_173_585 87 | chip_305_300 88 | chip_159_468 89 | chip_269_420 90 | chip_191_367 91 | chip_386_324 92 | chip_349_542 93 | chip_131_443 94 | chip_282_561 95 | chip_148_470 96 | chip_196_362 97 | chip_132_487 98 | chip_147_486 99 | chip_085_254 100 | chip_220_306 101 | chip_217_315 102 | chip_130_170 103 | chip_320_544 104 | chip_256_445 105 | chip_257_441 106 | chip_124_123 107 | chip_252_261 108 | chip_132_440 109 | chip_150_495 110 | chip_076_351 111 | chip_235_599 112 | chip_205_338 113 | chip_069_196 114 | chip_218_328 115 | chip_118_446 116 | chip_036_147 117 | chip_159_593 118 | chip_189_403 119 | chip_142_474 120 | chip_172_561 121 | chip_158_618 122 | chip_115_318 123 | chip_319_529 124 | chip_181_511 125 | chip_243_335 126 | chip_172_588 127 | chip_159_481 128 | chip_182_400 129 | chip_199_324 130 | chip_307_525 131 | chip_272_266 132 | chip_239_283 133 | chip_135_472 134 | chip_184_239 135 | chip_175_400 136 | chip_313_236 137 | chip_233_267 138 | chip_203_455 139 | chip_119_447 140 | chip_176_238 141 | chip_163_478 142 | chip_182_238 143 | chip_171_586 144 | chip_111_417 145 | chip_332_505 146 | chip_087_201 147 | chip_105_427 148 | chip_234_373 149 | chip_150_481 150 | chip_119_323 151 | chip_145_413 152 | chip_370_350 153 | chip_151_418 154 | chip_130_504 155 | chip_121_445 156 | chip_108_402 157 | chip_133_483 158 | chip_169_612 159 | chip_233_332 160 | chip_258_445 161 | chip_038_107 162 | chip_130_480 163 | chip_219_317 164 | chip_329_393 165 | chip_132_439 166 | chip_059_119 167 | chip_227_301 168 | chip_196_333 169 | chip_240_587 170 | chip_146_393 171 | chip_214_292 172 | chip_139_421 173 | chip_134_364 174 | chip_250_259 175 | chip_116_448 176 | chip_272_259 177 | chip_245_443 178 | chip_149_489 179 | chip_302_301 180 | chip_217_328 181 | chip_099_102 182 | chip_160_532 183 | chip_252_274 184 | chip_055_144 185 | chip_179_520 186 | chip_152_404 187 | chip_228_300 188 | chip_194_310 189 | chip_109_414 190 | chip_111_421 191 | chip_123_508 192 | chip_127_482 193 | chip_319_525 194 | chip_174_503 195 | chip_185_392 196 | chip_066_347 197 | chip_115_438 198 | chip_128_492 199 | chip_180_023 200 | chip_218_324 201 | chip_219_476 202 | chip_167_411 203 | chip_138_615 204 | chip_107_334 205 | chip_153_469 206 | chip_140_493 207 | chip_313_519 208 | chip_258_266 209 | chip_105_402 210 | chip_126_124 211 | chip_106_406 212 | chip_189_493 213 | chip_135_494 214 | chip_062_084 215 | chip_149_503 216 | chip_182_606 217 | chip_350_328 218 | chip_084_359 219 | chip_239_281 220 | chip_003_062 221 | chip_281_449 222 | chip_142_485 223 | chip_228_286 224 | chip_174_521 225 | chip_036_149 226 | chip_106_249 227 | chip_137_496 228 | chip_269_477 229 | chip_225_438 230 | chip_089_369 231 | chip_242_471 232 | chip_193_431 233 | chip_198_016 234 | chip_064_269 235 | chip_417_321 236 | chip_331_502 237 | chip_118_437 238 | chip_241_280 239 | chip_330_508 240 | chip_139_436 241 | chip_128_322 242 | chip_239_595 243 | chip_255_272 244 | chip_276_565 245 | chip_236_443 246 | chip_330_497 247 | chip_317_528 248 | chip_324_509 249 | chip_125_492 250 | chip_075_347 251 | chip_104_320 252 | chip_107_410 253 | chip_183_512 254 | chip_251_264 255 | chip_271_395 256 | chip_140_486 257 | chip_374_298 258 | chip_370_343 259 | chip_267_437 260 | chip_192_325 261 | chip_170_601 262 | chip_134_423 263 | chip_116_492 264 | chip_242_472 265 | chip_236_277 266 | chip_266_276 267 | chip_231_497 268 | chip_106_411 269 | chip_332_504 270 | chip_140_488 271 | chip_186_380 272 | chip_111_595 273 | chip_202_451 274 | chip_092_322 275 | chip_249_587 276 | chip_265_582 277 | chip_095_327 278 | chip_357_370 279 | chip_152_494 280 | chip_265_482 281 | chip_230_282 282 | chip_128_435 283 | chip_330_512 284 | chip_332_508 285 | chip_268_373 286 | chip_153_473 287 | chip_239_333 288 | chip_205_293 289 | chip_344_460 290 | chip_146_501 291 | chip_196_278 292 | chip_109_424 293 | chip_060_070 294 | chip_058_192 295 | chip_164_405 296 | chip_130_472 297 | chip_185_516 298 | chip_268_410 299 | chip_210_391 300 | chip_246_582 301 | chip_150_468 302 | chip_151_489 303 | chip_204_288 304 | chip_136_485 305 | chip_168_407 306 | chip_304_545 307 | chip_232_265 308 | chip_319_540 309 | chip_245_333 310 | chip_139_390 311 | chip_210_466 312 | chip_109_321 313 | chip_333_497 314 | chip_156_389 315 | chip_108_336 316 | chip_131_471 317 | chip_152_240 318 | chip_173_579 319 | chip_215_309 320 | chip_103_102 321 | chip_202_439 322 | chip_168_580 323 | chip_223_446 324 | chip_119_554 325 | chip_233_288 326 | chip_323_522 327 | chip_064_355 328 | chip_219_444 329 | chip_316_523 330 | chip_253_271 331 | chip_064_117 332 | chip_255_447 333 | chip_055_199 334 | chip_216_337 335 | chip_104_342 336 | chip_191_432 337 | chip_147_440 338 | chip_153_401 339 | chip_106_412 340 | chip_070_097 341 | chip_315_519 342 | chip_237_334 343 | chip_319_542 344 | chip_190_349 345 | chip_166_436 346 | chip_106_409 347 | chip_421_325 348 | chip_228_279 349 | chip_231_293 350 | chip_141_493 351 | chip_162_592 352 | chip_229_592 353 | chip_180_611 354 | chip_306_425 355 | chip_180_262 356 | chip_256_443 357 | chip_097_334 358 | chip_238_329 359 | chip_137_432 360 | chip_151_474 361 | chip_240_475 362 | chip_213_432 363 | chip_283_448 364 | chip_218_432 365 | chip_069_353 366 | chip_223_284 367 | chip_252_273 368 | chip_317_517 369 | chip_252_446 370 | chip_198_027 371 | chip_099_319 372 | chip_237_591 373 | chip_287_453 374 | chip_161_469 375 | chip_063_268 376 | chip_219_311 377 | chip_144_488 378 | chip_231_275 379 | chip_256_446 380 | chip_206_293 381 | chip_244_333 382 | chip_219_336 383 | chip_070_349 384 | chip_331_495 385 | chip_034_097 386 | chip_105_335 387 | chip_181_264 388 | chip_086_203 389 | chip_080_600 390 | chip_250_269 391 | chip_127_320 392 | chip_132_444 393 | chip_230_329 394 | chip_344_469 395 | chip_086_216 396 | chip_221_441 397 | chip_116_444 398 | chip_185_404 399 | chip_291_450 400 | chip_122_507 401 | chip_217_437 402 | chip_109_435 403 | chip_128_317 404 | chip_134_484 405 | chip_117_112 406 | chip_207_466 407 | chip_256_278 408 | chip_113_298 409 | chip_222_293 410 | chip_168_385 411 | chip_283_431 412 | chip_286_301 413 | chip_208_372 414 | chip_215_310 415 | chip_115_040 416 | chip_107_417 417 | chip_341_499 418 | chip_089_215 419 | chip_214_315 420 | chip_220_437 421 | chip_352_420 422 | chip_136_435 423 | chip_254_259 424 | chip_056_198 425 | chip_135_471 426 | chip_106_106 427 | chip_092_320 428 | chip_065_192 429 | chip_219_432 430 | chip_110_434 431 | chip_330_509 432 | chip_249_270 433 | chip_130_426 434 | chip_105_404 435 | chip_127_504 436 | chip_328_510 437 | chip_147_443 438 | chip_166_524 439 | chip_161_521 440 | chip_281_402 441 | chip_132_508 442 | chip_209_449 443 | chip_365_319 444 | chip_282_263 445 | chip_317_532 446 | chip_112_417 447 | chip_193_368 448 | chip_149_492 449 | chip_104_333 450 | chip_168_588 451 | chip_051_235 452 | chip_305_343 453 | chip_128_431 454 | chip_227_279 455 | chip_197_393 456 | chip_150_490 457 | chip_130_507 458 | chip_159_515 459 | chip_036_113 460 | chip_211_428 461 | chip_185_433 462 | chip_254_271 463 | chip_263_287 464 | chip_129_425 465 | chip_238_276 466 | chip_050_114 467 | chip_176_027 468 | chip_192_409 469 | chip_181_484 470 | chip_318_523 471 | chip_177_408 472 | chip_121_466 473 | chip_230_302 474 | chip_131_421 475 | chip_336_533 476 | chip_274_479 477 | chip_093_103 478 | chip_315_516 479 | chip_310_542 480 | chip_297_429 481 | chip_086_361 482 | chip_171_049 483 | chip_040_101 484 | chip_177_604 485 | chip_131_440 486 | chip_165_467 487 | chip_226_282 488 | chip_277_405 489 | chip_131_470 490 | chip_183_235 491 | chip_135_434 492 | chip_234_375 493 | chip_125_436 494 | chip_216_603 495 | chip_146_495 496 | chip_342_534 497 | chip_148_492 498 | chip_244_268 499 | chip_167_522 500 | chip_167_340 501 | chip_270_409 502 | chip_114_423 503 | chip_101_330 504 | chip_148_477 505 | chip_084_361 506 | chip_106_410 507 | chip_316_517 508 | chip_248_589 509 | chip_217_434 510 | chip_200_465 511 | chip_154_478 512 | chip_131_302 513 | chip_108_353 514 | chip_118_326 515 | chip_285_432 516 | chip_160_533 517 | chip_102_104 518 | chip_111_356 519 | chip_226_308 520 | chip_331_494 521 | chip_183_446 522 | chip_119_503 523 | chip_117_504 524 | chip_128_478 525 | chip_191_585 526 | chip_266_275 527 | chip_279_256 528 | chip_086_336 529 | chip_030_042 530 | chip_032_079 531 | chip_098_308 532 | chip_229_286 533 | chip_010_063 534 | chip_194_329 535 | chip_180_264 536 | chip_112_405 537 | chip_113_331 538 | chip_214_424 539 | chip_220_479 540 | chip_199_450 541 | chip_105_293 542 | chip_218_457 543 | chip_158_462 544 | chip_112_246 545 | chip_171_338 546 | chip_147_489 547 | chip_225_371 548 | chip_246_435 549 | chip_187_405 550 | chip_145_493 551 | chip_270_316 552 | chip_129_381 553 | chip_320_318 554 | chip_083_208 555 | chip_138_490 556 | chip_219_334 557 | chip_171_292 558 | chip_188_365 559 | chip_183_283 560 | chip_157_388 561 | chip_160_615 562 | chip_311_519 563 | chip_046_044 564 | chip_064_067 565 | chip_067_292 566 | chip_095_343 567 | chip_067_282 568 | chip_247_584 569 | chip_217_285 570 | chip_076_349 571 | chip_112_440 572 | chip_247_420 573 | chip_129_509 574 | chip_105_104 575 | chip_032_080 576 | chip_193_583 577 | chip_224_294 578 | chip_198_311 579 | chip_119_448 580 | chip_258_443 581 | chip_354_327 582 | chip_165_476 583 | chip_134_361 584 | chip_071_352 585 | chip_136_495 586 | chip_037_102 587 | chip_293_548 588 | chip_149_490 589 | chip_155_412 590 | chip_255_438 591 | chip_337_506 592 | chip_157_597 593 | chip_334_501 594 | chip_281_270 595 | chip_113_329 596 | chip_183_517 597 | chip_213_282 598 | chip_063_115 599 | chip_123_323 600 | chip_045_229 601 | chip_253_260 602 | chip_144_496 603 | chip_329_497 604 | chip_248_588 605 | chip_222_330 606 | chip_097_356 607 | chip_220_307 608 | chip_185_244 609 | chip_214_466 610 | chip_240_276 611 | chip_319_522 612 | chip_151_265 613 | chip_127_480 614 | chip_164_420 615 | chip_298_351 616 | chip_242_448 617 | chip_199_317 618 | chip_256_442 619 | chip_145_472 620 | chip_180_244 621 | chip_251_288 622 | chip_119_340 623 | chip_064_191 624 | chip_254_286 625 | chip_149_265 626 | chip_137_493 627 | chip_282_558 628 | chip_221_324 629 | chip_096_303 630 | chip_249_290 631 | chip_111_414 632 | chip_200_441 633 | chip_242_335 634 | chip_321_338 635 | chip_128_426 636 | chip_117_447 637 | chip_319_524 638 | chip_322_313 639 | chip_138_480 640 | chip_079_095 641 | chip_173_586 642 | chip_179_341 643 | chip_047_172 644 | chip_189_406 645 | chip_154_484 646 | chip_196_258 647 | chip_062_082 648 | chip_149_501 649 | chip_124_504 650 | chip_174_402 651 | chip_115_503 652 | chip_326_500 653 | chip_242_590 654 | chip_128_051 655 | chip_333_498 656 | chip_193_311 657 | chip_061_084 658 | chip_334_499 659 | chip_236_595 660 | chip_182_424 661 | chip_110_340 662 | chip_171_431 663 | chip_138_442 664 | chip_168_325 665 | chip_128_508 666 | chip_254_421 667 | chip_230_490 668 | chip_162_481 669 | chip_087_335 670 | chip_333_499 671 | chip_301_456 672 | chip_165_583 673 | chip_295_541 674 | chip_140_429 675 | chip_115_478 676 | chip_085_264 677 | chip_224_371 678 | chip_281_254 679 | chip_060_196 680 | chip_064_354 681 | chip_114_320 682 | chip_132_506 683 | chip_205_292 684 | chip_019_091 685 | chip_259_414 686 | chip_204_409 687 | chip_343_540 688 | chip_139_366 689 | chip_291_554 690 | chip_234_337 691 | chip_270_408 692 | chip_149_264 693 | chip_094_339 694 | chip_053_192 695 | chip_191_403 696 | chip_171_264 697 | chip_329_489 698 | chip_164_415 699 | chip_223_372 700 | chip_147_478 701 | chip_243_468 702 | chip_327_521 703 | chip_252_264 704 | chip_159_520 705 | chip_322_522 706 | chip_251_279 707 | chip_248_427 708 | chip_204_389 709 | chip_218_315 710 | chip_267_382 711 | chip_166_265 712 | chip_153_375 713 | chip_316_343 714 | chip_252_266 715 | chip_236_444 716 | chip_093_354 717 | chip_196_274 718 | chip_219_257 719 | chip_145_473 720 | chip_269_270 721 | chip_252_584 722 | chip_120_323 723 | chip_241_586 724 | chip_107_414 725 | chip_301_382 726 | chip_153_601 727 | chip_092_356 728 | chip_252_284 729 | chip_281_272 730 | chip_163_403 731 | chip_221_330 732 | chip_391_325 733 | chip_207_402 734 | chip_309_544 735 | chip_042_116 736 | chip_184_518 737 | chip_121_504 738 | chip_118_339 739 | chip_117_449 740 | chip_219_437 741 | chip_164_516 742 | chip_120_126 743 | chip_239_444 744 | chip_127_127 745 | chip_160_465 746 | chip_309_380 747 | chip_238_466 748 | chip_066_117 749 | chip_233_334 750 | chip_217_442 751 | chip_396_322 752 | chip_223_480 753 | chip_293_351 754 | chip_230_303 755 | chip_171_353 756 | chip_088_361 757 | chip_197_361 758 | chip_220_184 759 | chip_145_483 760 | chip_269_268 761 | chip_330_523 762 | chip_098_340 763 | chip_234_295 764 | chip_172_182 765 | chip_272_562 766 | chip_254_267 767 | chip_130_382 768 | chip_231_288 769 | chip_234_441 770 | chip_185_399 771 | chip_152_469 772 | chip_154_252 773 | chip_091_253 774 | chip_105_407 775 | chip_238_445 776 | chip_179_585 777 | chip_119_509 778 | chip_219_429 779 | chip_059_087 780 | chip_040_114 781 | chip_086_272 782 | chip_276_402 783 | chip_236_367 784 | chip_227_309 785 | chip_247_580 786 | chip_333_504 787 | chip_157_466 788 | chip_196_400 789 | chip_212_447 790 | chip_138_494 791 | chip_343_532 792 | chip_095_322 793 | chip_182_425 794 | chip_104_308 795 | chip_213_391 796 | chip_163_524 797 | chip_131_487 798 | chip_204_390 799 | chip_165_468 800 | chip_203_376 801 | chip_123_494 802 | chip_165_524 803 | chip_109_439 804 | chip_218_460 805 | chip_074_351 806 | chip_114_478 807 | chip_130_381 808 | chip_089_303 809 | chip_281_266 810 | chip_226_314 811 | chip_132_472 812 | chip_032_183 813 | chip_148_488 814 | chip_352_421 815 | chip_201_288 816 | chip_155_483 817 | chip_206_296 818 | chip_135_361 819 | chip_169_590 820 | chip_235_290 821 | chip_119_444 822 | chip_239_446 823 | chip_218_430 824 | chip_165_402 825 | chip_148_494 826 | chip_251_261 827 | chip_260_484 828 | chip_106_334 829 | chip_206_391 830 | chip_297_353 831 | chip_312_160 832 | chip_365_309 833 | chip_335_504 834 | chip_083_175 835 | chip_117_505 836 | chip_141_442 837 | chip_241_447 838 | chip_227_295 839 | chip_268_370 840 | chip_129_446 841 | chip_111_439 842 | chip_118_448 843 | chip_162_478 844 | chip_063_079 845 | chip_230_353 846 | chip_125_439 847 | chip_262_448 848 | chip_161_482 849 | chip_217_290 850 | chip_238_335 851 | chip_174_586 852 | chip_234_445 853 | chip_233_369 854 | chip_194_409 855 | chip_240_594 856 | chip_112_321 857 | chip_286_552 858 | chip_117_486 859 | chip_129_478 860 | chip_296_134 861 | chip_134_304 862 | chip_127_303 863 | chip_250_583 864 | chip_034_104 865 | chip_191_397 866 | chip_117_438 867 | chip_251_320 868 | chip_180_362 869 | chip_325_505 870 | chip_253_445 871 | chip_146_340 872 | chip_227_331 873 | chip_149_468 874 | chip_219_446 875 | chip_326_517 876 | chip_100_341 877 | chip_121_560 878 | chip_235_331 879 | chip_162_460 880 | chip_155_475 881 | chip_073_268 882 | chip_233_273 883 | chip_194_280 884 | chip_067_087 885 | chip_117_320 886 | chip_142_361 887 | chip_207_359 888 | chip_255_277 889 | chip_198_395 890 | chip_221_308 891 | chip_240_588 892 | chip_266_438 893 | chip_229_297 894 | chip_120_340 895 | chip_202_017 896 | chip_186_489 897 | chip_169_346 898 | chip_181_256 899 | chip_157_467 900 | chip_252_272 901 | chip_291_454 902 | chip_265_277 903 | chip_318_339 904 | chip_078_198 905 | chip_126_495 906 | chip_115_447 907 | chip_181_610 908 | chip_203_288 909 | chip_127_477 910 | chip_236_271 911 | chip_223_468 912 | chip_283_281 913 | chip_143_486 914 | chip_147_479 915 | chip_110_435 916 | chip_251_270 917 | chip_249_337 918 | chip_271_389 919 | chip_096_316 920 | chip_161_403 921 | chip_152_470 922 | chip_230_283 923 | chip_251_445 924 | chip_275_406 925 | chip_256_444 926 | chip_119_126 927 | chip_168_382 928 | chip_198_366 929 | chip_151_493 930 | chip_320_525 931 | chip_204_458 932 | chip_148_469 933 | chip_217_432 934 | chip_133_441 935 | chip_331_498 936 | chip_220_316 937 | chip_130_508 938 | chip_372_342 939 | chip_164_517 940 | chip_100_322 941 | chip_245_334 942 | chip_227_275 943 | chip_213_439 944 | chip_074_096 945 | chip_166_411 946 | chip_326_524 947 | chip_003_063 948 | chip_242_588 949 | chip_225_372 950 | chip_047_106 951 | chip_202_367 952 | chip_155_474 953 | chip_231_594 954 | chip_165_520 955 | chip_133_363 956 | chip_108_352 957 | chip_123_495 958 | chip_226_605 959 | chip_063_120 960 | chip_177_428 961 | chip_218_325 962 | chip_205_480 963 | chip_230_459 964 | chip_218_440 965 | chip_178_605 966 | chip_054_182 967 | chip_118_422 968 | chip_245_435 969 | chip_249_592 970 | chip_148_413 971 | chip_216_264 972 | chip_060_117 973 | chip_106_026 974 | chip_087_360 975 | chip_164_414 976 | chip_182_607 977 | chip_134_499 978 | chip_233_477 979 | chip_237_472 980 | chip_150_265 981 | chip_208_457 982 | chip_153_472 983 | chip_112_448 984 | chip_177_269 985 | chip_183_518 986 | chip_137_476 987 | chip_112_342 988 | chip_232_372 989 | chip_206_401 990 | chip_255_266 991 | chip_193_401 992 | chip_181_239 993 | chip_245_582 994 | chip_213_429 995 | chip_166_379 996 | chip_333_509 997 | chip_260_443 998 | chip_182_484 999 | chip_345_460 1000 | chip_419_323 1001 | chip_152_479 1002 | chip_248_586 1003 | chip_208_383 1004 | chip_108_438 1005 | chip_170_595 1006 | chip_241_598 1007 | chip_181_605 1008 | chip_226_278 1009 | chip_254_587 1010 | chip_108_403 1011 | chip_189_402 1012 | chip_127_125 1013 | chip_164_583 1014 | chip_163_252 1015 | chip_123_482 1016 | chip_151_501 1017 | chip_139_437 1018 | chip_127_478 1019 | chip_141_475 1020 | chip_239_330 1021 | chip_262_583 1022 | chip_307_410 1023 | chip_217_287 1024 | chip_418_313 1025 | chip_264_482 1026 | chip_181_396 1027 | chip_048_231 1028 | chip_193_314 1029 | chip_279_403 1030 | chip_232_441 1031 | chip_169_271 1032 | chip_205_283 1033 | chip_254_443 1034 | chip_318_534 1035 | chip_026_110 1036 | chip_322_508 1037 | chip_114_420 1038 | chip_287_412 1039 | chip_218_336 1040 | chip_047_104 1041 | chip_035_148 1042 | chip_194_433 1043 | chip_219_436 1044 | chip_187_409 1045 | chip_194_364 1046 | chip_180_242 1047 | chip_196_325 1048 | chip_148_473 1049 | chip_389_324 1050 | chip_187_414 1051 | chip_269_370 1052 | chip_316_514 1053 | chip_223_483 1054 | chip_127_442 1055 | chip_209_311 1056 | chip_210_308 1057 | chip_214_428 1058 | chip_211_308 1059 | chip_248_591 1060 | chip_185_408 1061 | chip_152_584 1062 | chip_164_526 1063 | chip_092_336 1064 | chip_190_284 1065 | chip_109_436 1066 | chip_198_457 1067 | chip_070_294 1068 | chip_037_107 1069 | chip_250_329 1070 | chip_364_348 1071 | chip_173_580 1072 | chip_136_434 1073 | chip_072_095 1074 | chip_230_358 1075 | chip_318_540 1076 | chip_059_086 1077 | chip_234_332 1078 | chip_271_410 1079 | chip_093_104 1080 | chip_102_326 1081 | chip_197_311 1082 | chip_185_263 1083 | chip_125_479 1084 | chip_126_478 1085 | chip_202_463 1086 | chip_239_332 1087 | chip_195_314 1088 | chip_240_474 1089 | chip_132_507 1090 | chip_146_482 1091 | chip_142_487 1092 | chip_153_479 1093 | chip_255_417 1094 | chip_146_489 1095 | chip_252_279 1096 | chip_260_431 1097 | chip_140_436 1098 | chip_171_588 1099 | chip_238_463 1100 | chip_217_311 1101 | chip_041_103 1102 | chip_182_426 1103 | chip_301_454 1104 | chip_152_433 1105 | chip_123_491 1106 | chip_327_523 1107 | chip_214_311 1108 | chip_165_521 1109 | chip_309_470 1110 | chip_175_511 1111 | chip_251_271 1112 | chip_060_079 1113 | chip_180_342 1114 | chip_176_604 1115 | chip_218_423 1116 | chip_089_364 1117 | chip_126_442 1118 | chip_133_508 1119 | chip_106_413 1120 | chip_088_302 1121 | chip_214_429 1122 | chip_173_405 1123 | chip_132_470 1124 | chip_283_564 1125 | chip_333_503 1126 | chip_256_259 1127 | chip_113_415 1128 | chip_235_445 1129 | chip_052_241 1130 | chip_157_468 1131 | chip_038_109 1132 | chip_257_443 1133 | chip_218_429 1134 | chip_054_202 1135 | chip_129_507 1136 | chip_103_104 1137 | chip_180_238 1138 | chip_119_556 1139 | chip_138_435 1140 | chip_391_326 1141 | chip_099_321 1142 | chip_276_083 1143 | chip_169_409 1144 | chip_056_115 1145 | chip_194_308 1146 | chip_173_519 1147 | chip_313_518 1148 | chip_147_493 1149 | chip_097_327 1150 | chip_237_590 1151 | chip_137_495 1152 | chip_237_442 1153 | chip_246_583 1154 | chip_107_407 1155 | chip_119_321 1156 | chip_173_516 1157 | chip_098_357 1158 | chip_317_530 1159 | chip_115_602 1160 | chip_182_521 1161 | chip_338_432 1162 | chip_138_474 1163 | chip_377_349 1164 | chip_216_461 1165 | chip_169_410 1166 | chip_166_382 1167 | chip_200_415 1168 | chip_108_401 1169 | chip_183_237 1170 | chip_183_425 1171 | chip_196_330 1172 | chip_205_435 1173 | chip_191_419 1174 | chip_151_478 1175 | chip_161_525 1176 | chip_227_278 1177 | chip_194_385 1178 | chip_327_511 1179 | chip_119_446 1180 | chip_220_440 1181 | chip_230_285 1182 | chip_219_279 1183 | chip_111_407 1184 | chip_236_598 1185 | chip_038_239 1186 | chip_144_498 1187 | chip_159_406 1188 | chip_148_483 1189 | chip_186_265 1190 | chip_124_337 1191 | chip_255_444 1192 | chip_270_476 1193 | chip_175_586 1194 | chip_195_367 1195 | chip_139_585 1196 | chip_315_523 1197 | chip_100_332 1198 | chip_171_611 1199 | chip_251_446 1200 | chip_166_471 1201 | chip_285_307 1202 | chip_152_471 1203 | chip_113_358 1204 | chip_247_290 1205 | chip_194_257 1206 | chip_134_308 1207 | chip_123_393 1208 | chip_314_519 1209 | chip_203_268 1210 | chip_122_560 1211 | chip_131_472 1212 | chip_150_485 1213 | chip_164_475 1214 | chip_120_559 1215 | chip_222_268 1216 | chip_251_259 1217 | chip_379_328 1218 | chip_141_494 1219 | chip_269_563 1220 | chip_126_357 1221 | chip_314_515 1222 | chip_271_267 1223 | chip_237_457 1224 | chip_153_470 1225 | chip_049_114 1226 | chip_121_375 1227 | chip_190_419 1228 | chip_282_252 1229 | chip_132_302 1230 | chip_332_509 1231 | chip_059_117 1232 | chip_126_496 1233 | chip_200_030 1234 | chip_185_305 1235 | chip_160_483 1236 | chip_106_440 1237 | chip_258_419 1238 | chip_276_264 1239 | chip_151_539 1240 | chip_143_444 1241 | chip_238_281 1242 | chip_056_058 1243 | chip_221_283 1244 | chip_161_377 1245 | chip_201_457 1246 | chip_275_405 1247 | chip_185_368 1248 | chip_065_348 1249 | chip_174_406 1250 | chip_168_388 1251 | chip_266_434 1252 | chip_317_544 1253 | chip_234_371 1254 | chip_161_467 1255 | chip_198_478 1256 | chip_080_176 1257 | chip_344_415 1258 | chip_228_314 1259 | chip_126_438 1260 | chip_058_221 1261 | chip_206_197 1262 | chip_126_427 1263 | chip_125_129 1264 | chip_127_497 1265 | chip_257_446 1266 | chip_099_355 1267 | chip_252_590 1268 | chip_150_487 1269 | chip_150_470 1270 | chip_120_557 1271 | chip_105_329 1272 | chip_168_433 1273 | chip_103_318 1274 | chip_333_515 1275 | chip_121_509 1276 | chip_131_477 1277 | chip_196_350 1278 | chip_253_595 1279 | chip_080_230 1280 | chip_175_238 1281 | chip_159_403 1282 | chip_190_286 1283 | chip_203_407 1284 | chip_316_519 1285 | chip_176_579 1286 | chip_117_326 1287 | chip_155_484 1288 | chip_116_208 1289 | chip_151_484 1290 | chip_223_288 1291 | chip_260_571 1292 | chip_155_402 1293 | chip_119_125 1294 | chip_330_517 1295 | chip_175_236 1296 | chip_112_479 1297 | chip_244_334 1298 | chip_208_417 1299 | chip_087_268 1300 | chip_055_200 1301 | chip_390_324 1302 | chip_157_480 1303 | chip_179_506 1304 | chip_274_467 1305 | chip_226_269 1306 | chip_199_323 1307 | chip_143_469 1308 | chip_209_381 1309 | chip_130_422 1310 | chip_308_524 1311 | chip_282_267 1312 | chip_142_481 1313 | chip_125_447 1314 | chip_066_353 1315 | chip_219_266 1316 | chip_097_302 1317 | chip_034_148 1318 | chip_232_277 1319 | chip_121_482 1320 | chip_105_332 1321 | chip_220_314 1322 | chip_163_593 1323 | chip_231_294 1324 | chip_128_485 1325 | chip_182_361 1326 | chip_202_324 1327 | chip_114_421 1328 | chip_223_442 1329 | chip_213_449 1330 | chip_152_489 1331 | chip_200_391 1332 | chip_058_090 1333 | chip_081_338 1334 | chip_220_448 1335 | chip_111_401 1336 | chip_154_487 1337 | chip_129_444 1338 | chip_321_518 1339 | chip_131_381 1340 | chip_144_444 1341 | chip_097_322 1342 | chip_128_507 1343 | chip_175_588 1344 | chip_155_469 1345 | chip_256_557 1346 | chip_233_268 1347 | chip_237_593 1348 | chip_119_558 1349 | chip_228_294 1350 | chip_221_333 1351 | chip_169_398 1352 | chip_197_291 1353 | chip_133_473 1354 | chip_113_317 1355 | chip_150_500 1356 | chip_267_383 1357 | chip_134_510 1358 | chip_132_438 1359 | chip_227_332 1360 | chip_249_446 1361 | chip_248_585 1362 | chip_201_398 1363 | chip_249_291 1364 | chip_115_604 1365 | chip_272_429 1366 | chip_283_432 1367 | chip_343_469 1368 | chip_210_278 1369 | chip_175_411 1370 | chip_213_335 1371 | chip_116_504 1372 | chip_318_344 1373 | chip_127_471 1374 | chip_074_058 1375 | chip_146_485 1376 | chip_091_324 1377 | chip_193_582 1378 | chip_219_309 1379 | chip_213_426 1380 | chip_184_261 1381 | chip_302_382 1382 | chip_219_328 1383 | chip_221_426 1384 | chip_185_497 1385 | chip_326_317 1386 | chip_159_421 1387 | chip_107_413 1388 | chip_222_023 1389 | chip_234_288 1390 | chip_183_613 1391 | chip_122_374 1392 | chip_320_522 1393 | chip_141_488 1394 | chip_105_317 1395 | chip_299_360 1396 | chip_128_480 1397 | chip_320_523 1398 | chip_238_444 1399 | chip_225_357 1400 | chip_125_503 1401 | chip_080_095 1402 | chip_134_471 1403 | chip_163_519 1404 | chip_271_266 1405 | chip_232_332 1406 | chip_054_107 1407 | chip_326_516 1408 | chip_210_309 1409 | chip_136_471 1410 | chip_183_607 1411 | chip_120_436 1412 | chip_235_440 1413 | chip_296_135 1414 | chip_073_095 1415 | chip_319_339 1416 | chip_208_424 1417 | chip_214_329 1418 | chip_155_467 1419 | chip_174_397 1420 | chip_228_431 1421 | chip_243_446 1422 | chip_199_275 1423 | chip_183_356 1424 | chip_239_598 1425 | chip_141_443 1426 | chip_280_559 1427 | chip_164_376 1428 | chip_128_506 1429 | chip_065_286 1430 | chip_064_120 1431 | chip_105_617 1432 | chip_189_396 1433 | chip_101_342 1434 | chip_074_291 1435 | chip_113_441 1436 | chip_329_521 1437 | chip_329_498 1438 | chip_265_439 1439 | chip_142_483 1440 | chip_257_268 1441 | chip_256_280 1442 | chip_230_280 1443 | chip_207_331 1444 | chip_054_115 1445 | chip_151_488 1446 | chip_136_437 1447 | chip_209_445 1448 | chip_253_582 1449 | chip_188_349 1450 | chip_100_333 1451 | chip_329_502 1452 | chip_308_526 1453 | chip_246_333 1454 | chip_230_291 1455 | chip_130_425 1456 | chip_229_605 1457 | chip_148_593 1458 | chip_222_369 1459 | chip_171_347 1460 | chip_124_479 1461 | chip_103_417 1462 | chip_318_539 1463 | chip_129_506 1464 | chip_086_204 1465 | chip_222_333 1466 | chip_140_411 1467 | chip_117_448 1468 | chip_211_335 1469 | chip_059_191 1470 | chip_343_341 1471 | chip_233_478 1472 | chip_176_587 1473 | chip_226_280 1474 | chip_038_168 1475 | chip_330_502 1476 | chip_147_393 1477 | chip_317_545 1478 | chip_181_578 1479 | chip_163_454 1480 | chip_211_334 1481 | chip_203_401 1482 | chip_214_308 1483 | chip_284_276 1484 | chip_228_285 1485 | chip_318_528 1486 | chip_204_418 1487 | chip_175_401 1488 | chip_097_099 1489 | chip_165_377 1490 | chip_134_497 1491 | chip_063_357 1492 | chip_275_469 1493 | chip_076_300 1494 | chip_185_242 1495 | chip_093_331 1496 | chip_081_174 1497 | chip_326_501 1498 | chip_158_617 1499 | chip_228_276 1500 | chip_071_347 1501 | chip_113_442 1502 | chip_220_429 1503 | chip_237_466 1504 | chip_180_430 1505 | chip_089_250 1506 | chip_130_159 1507 | chip_123_383 1508 | chip_204_401 1509 | chip_047_089 1510 | chip_132_133 1511 | chip_142_498 1512 | chip_319_534 1513 | chip_144_497 1514 | chip_282_128 1515 | chip_255_445 1516 | chip_251_284 1517 | chip_220_328 1518 | chip_185_502 1519 | chip_194_315 1520 | chip_174_412 1521 | chip_325_527 1522 | chip_223_271 1523 | chip_304_541 1524 | chip_115_338 1525 | chip_174_152 1526 | chip_123_434 1527 | chip_107_440 1528 | chip_181_353 1529 | chip_179_263 1530 | chip_309_524 1531 | chip_247_590 1532 | chip_173_605 1533 | chip_136_484 1534 | chip_133_309 1535 | chip_154_408 1536 | chip_198_312 1537 | chip_171_606 1538 | chip_152_490 1539 | chip_145_498 1540 | chip_336_499 1541 | chip_059_193 1542 | chip_025_082 1543 | chip_097_316 1544 | chip_219_431 1545 | chip_317_531 1546 | chip_061_112 1547 | chip_388_323 1548 | chip_320_535 1549 | chip_138_427 1550 | chip_156_598 1551 | chip_244_586 1552 | chip_093_340 1553 | chip_155_346 1554 | chip_105_452 1555 | chip_142_490 1556 | chip_031_195 1557 | chip_145_406 1558 | chip_241_474 1559 | chip_203_335 1560 | chip_095_338 1561 | chip_184_410 1562 | chip_084_265 1563 | chip_212_445 1564 | chip_121_503 1565 | chip_135_321 1566 | chip_171_346 1567 | chip_224_326 1568 | chip_416_318 1569 | chip_134_494 1570 | chip_329_508 1571 | chip_092_253 1572 | chip_131_315 1573 | chip_225_325 1574 | chip_333_508 1575 | chip_044_085 1576 | chip_241_335 1577 | chip_165_403 1578 | chip_214_449 1579 | chip_130_509 1580 | chip_282_266 1581 | chip_135_497 1582 | chip_232_330 1583 | chip_242_336 1584 | chip_333_495 1585 | chip_277_403 1586 | chip_419_322 1587 | chip_296_540 1588 | chip_231_285 1589 | chip_101_355 1590 | chip_219_312 1591 | chip_105_331 1592 | chip_275_398 1593 | chip_308_293 1594 | chip_138_495 1595 | chip_119_492 1596 | chip_224_333 1597 | chip_179_462 1598 | chip_265_310 1599 | chip_282_262 1600 | chip_022_126 1601 | chip_163_469 1602 | chip_152_480 1603 | chip_057_065 1604 | chip_297_368 1605 | chip_218_293 1606 | chip_119_403 1607 | chip_184_363 1608 | chip_195_366 1609 | chip_097_103 1610 | chip_220_332 1611 | chip_067_352 1612 | chip_131_473 1613 | chip_239_591 1614 | chip_145_474 1615 | chip_134_360 1616 | chip_043_099 1617 | chip_111_418 1618 | chip_237_595 1619 | chip_124_483 1620 | chip_243_336 1621 | chip_218_311 1622 | chip_119_322 1623 | chip_270_317 1624 | chip_078_257 1625 | chip_226_272 1626 | chip_259_577 1627 | chip_184_404 1628 | chip_148_495 1629 | chip_191_511 1630 | chip_142_155 1631 | chip_267_566 1632 | chip_139_491 1633 | chip_167_425 1634 | chip_119_504 1635 | chip_133_470 1636 | chip_096_306 1637 | chip_164_469 1638 | chip_112_446 1639 | chip_175_466 1640 | chip_159_592 1641 | chip_191_466 1642 | chip_332_507 1643 | chip_310_543 1644 | chip_188_403 1645 | chip_365_348 1646 | chip_237_289 1647 | chip_126_477 1648 | chip_173_351 1649 | chip_220_333 1650 | chip_334_471 1651 | chip_345_468 1652 | chip_301_353 1653 | chip_201_410 1654 | chip_134_495 1655 | chip_125_124 1656 | chip_262_433 1657 | chip_206_356 1658 | chip_221_460 1659 | chip_324_316 1660 | chip_103_307 1661 | chip_041_099 1662 | chip_168_610 1663 | chip_334_507 1664 | chip_033_148 1665 | chip_155_255 1666 | chip_099_357 1667 | chip_194_261 1668 | chip_064_352 1669 | chip_144_469 1670 | chip_187_400 1671 | chip_136_487 1672 | chip_309_526 1673 | chip_144_478 1674 | chip_168_612 1675 | chip_079_220 1676 | chip_210_310 1677 | chip_207_424 1678 | chip_157_520 1679 | chip_185_483 1680 | chip_231_332 1681 | chip_167_580 1682 | chip_270_298 1683 | chip_208_324 1684 | chip_244_279 1685 | chip_087_364 1686 | chip_226_291 1687 | chip_112_357 1688 | chip_114_487 1689 | chip_376_346 1690 | chip_279_265 1691 | chip_039_167 1692 | chip_225_315 1693 | chip_124_302 1694 | chip_308_517 1695 | chip_068_352 1696 | chip_112_423 1697 | chip_259_576 1698 | chip_249_272 1699 | chip_093_353 1700 | chip_145_469 1701 | chip_168_392 1702 | chip_273_466 1703 | chip_125_445 1704 | chip_250_446 1705 | chip_217_289 1706 | chip_118_442 1707 | chip_177_264 1708 | chip_166_263 1709 | chip_130_510 1710 | chip_104_416 1711 | chip_149_494 1712 | chip_206_427 1713 | chip_140_485 1714 | chip_170_514 1715 | chip_082_204 1716 | chip_144_494 1717 | chip_223_330 1718 | chip_172_154 1719 | chip_224_440 1720 | chip_160_377 1721 | chip_351_539 1722 | chip_267_274 1723 | chip_145_393 1724 | chip_052_240 1725 | chip_137_494 1726 | chip_048_070 1727 | chip_275_407 1728 | chip_295_535 1729 | chip_123_507 1730 | chip_249_427 1731 | chip_358_325 1732 | chip_082_104 1733 | chip_110_429 1734 | chip_215_329 1735 | chip_330_333 1736 | chip_143_426 1737 | chip_369_343 1738 | chip_087_269 1739 | chip_166_386 1740 | chip_144_439 1741 | chip_129_475 1742 | chip_058_191 1743 | chip_386_323 1744 | chip_332_506 1745 | chip_184_238 1746 | chip_229_298 1747 | chip_113_420 1748 | chip_323_507 1749 | chip_138_360 1750 | chip_154_346 1751 | chip_065_196 1752 | chip_165_582 1753 | chip_321_521 1754 | chip_130_445 1755 | chip_175_585 1756 | chip_207_389 1757 | chip_132_426 1758 | chip_231_281 1759 | chip_188_404 1760 | chip_158_334 1761 | chip_030_076 1762 | chip_168_393 1763 | chip_258_413 1764 | chip_092_214 1765 | chip_176_585 1766 | chip_340_331 1767 | chip_206_287 1768 | chip_101_358 1769 | chip_317_529 1770 | chip_185_348 1771 | chip_060_258 1772 | chip_270_475 1773 | chip_167_589 1774 | chip_253_587 1775 | chip_106_421 1776 | chip_145_501 1777 | chip_343_535 1778 | chip_275_465 1779 | chip_032_197 1780 | chip_175_428 1781 | chip_183_501 1782 | chip_065_118 1783 | chip_264_434 1784 | chip_329_488 1785 | chip_157_481 1786 | chip_242_466 1787 | chip_115_493 1788 | chip_180_022 1789 | chip_162_600 1790 | chip_122_492 1791 | chip_039_111 1792 | chip_236_369 1793 | chip_230_286 1794 | chip_215_241 1795 | chip_274_468 1796 | chip_221_306 1797 | chip_143_476 1798 | chip_239_337 1799 | chip_160_401 1800 | chip_155_479 1801 | chip_192_347 1802 | chip_164_468 1803 | chip_175_605 1804 | chip_209_312 1805 | chip_132_443 1806 | chip_211_435 1807 | chip_128_326 1808 | chip_235_332 1809 | chip_135_316 1810 | chip_116_409 1811 | chip_167_598 1812 | chip_207_478 1813 | chip_193_330 1814 | chip_200_478 1815 | chip_190_400 1816 | chip_374_346 1817 | chip_146_479 1818 | chip_256_277 1819 | chip_132_488 1820 | chip_204_402 1821 | chip_167_433 1822 | chip_099_334 1823 | chip_239_285 1824 | chip_223_311 1825 | chip_076_616 1826 | chip_221_311 1827 | chip_282_264 1828 | chip_146_496 1829 | chip_118_322 1830 | chip_085_360 1831 | chip_057_192 1832 | chip_318_529 1833 | chip_134_493 1834 | chip_114_422 1835 | chip_227_431 1836 | chip_146_493 1837 | chip_138_426 1838 | chip_126_307 1839 | chip_319_510 1840 | chip_243_333 1841 | chip_221_321 1842 | chip_292_552 1843 | chip_150_493 1844 | chip_096_322 1845 | chip_115_507 1846 | chip_317_343 1847 | chip_251_584 1848 | chip_275_470 1849 | chip_239_588 1850 | chip_124_304 1851 | chip_180_610 1852 | chip_220_336 1853 | chip_094_329 1854 | chip_422_327 1855 | chip_272_264 1856 | chip_224_373 1857 | chip_224_439 1858 | chip_149_266 1859 | chip_346_328 1860 | chip_222_453 1861 | chip_332_503 1862 | chip_346_428 1863 | chip_154_480 1864 | chip_323_527 1865 | chip_226_274 1866 | chip_233_590 1867 | chip_147_392 1868 | chip_251_590 1869 | chip_103_347 1870 | chip_168_611 1871 | chip_236_449 1872 | chip_144_440 1873 | chip_132_500 1874 | chip_044_219 1875 | chip_145_488 1876 | chip_281_265 1877 | chip_327_500 1878 | chip_244_582 1879 | chip_133_481 1880 | chip_281_082 1881 | chip_226_333 1882 | chip_050_174 1883 | chip_282_562 1884 | chip_181_400 1885 | chip_165_265 1886 | chip_142_499 1887 | chip_162_469 1888 | chip_171_587 1889 | chip_080_231 1890 | chip_093_212 1891 | chip_133_318 1892 | chip_167_263 1893 | chip_210_272 1894 | chip_334_503 1895 | chip_164_266 1896 | chip_210_318 1897 | chip_127_446 1898 | chip_102_201 1899 | chip_061_094 1900 | chip_219_603 1901 | chip_075_350 1902 | chip_275_474 1903 | chip_131_444 1904 | chip_127_483 1905 | chip_388_324 1906 | chip_192_474 1907 | chip_254_270 1908 | chip_062_350 1909 | chip_326_388 1910 | chip_228_306 1911 | chip_140_443 1912 | chip_125_308 1913 | chip_240_336 1914 | chip_116_486 1915 | chip_183_520 1916 | chip_038_203 1917 | chip_274_464 1918 | chip_221_309 1919 | chip_257_559 1920 | chip_061_061 1921 | chip_059_091 1922 | chip_378_349 1923 | chip_112_039 1924 | chip_214_450 1925 | chip_166_587 1926 | chip_232_293 1927 | chip_244_594 1928 | chip_203_435 1929 | chip_122_501 1930 | chip_282_265 1931 | chip_227_304 1932 | chip_215_336 1933 | chip_091_336 1934 | chip_123_492 1935 | chip_132_441 1936 | chip_180_398 1937 | chip_222_432 1938 | chip_101_357 1939 | chip_123_483 1940 | chip_149_415 1941 | chip_200_319 1942 | chip_200_364 1943 | chip_287_391 1944 | chip_108_421 1945 | chip_228_469 1946 | chip_129_325 1947 | chip_174_408 1948 | chip_153_415 1949 | chip_373_294 1950 | chip_038_204 1951 | chip_175_432 1952 | chip_351_393 1953 | chip_252_281 1954 | chip_162_251 1955 | chip_049_086 1956 | chip_221_443 1957 | chip_080_229 1958 | chip_223_481 1959 | chip_214_305 1960 | chip_107_105 1961 | chip_105_319 1962 | chip_108_405 1963 | chip_314_342 1964 | chip_150_440 1965 | chip_128_311 1966 | chip_228_298 1967 | chip_219_250 1968 | chip_079_345 1969 | chip_162_377 1970 | chip_071_348 1971 | chip_244_592 1972 | chip_318_527 1973 | chip_219_314 1974 | chip_214_307 1975 | chip_082_339 1976 | chip_137_309 1977 | chip_120_447 1978 | chip_255_274 1979 | chip_107_106 1980 | chip_177_265 1981 | chip_124_441 1982 | chip_257_483 1983 | chip_221_440 1984 | chip_169_611 1985 | chip_124_497 1986 | chip_125_443 1987 | chip_146_497 1988 | chip_165_441 1989 | chip_212_334 1990 | chip_207_410 1991 | chip_081_176 1992 | chip_289_306 1993 | chip_120_558 1994 | chip_147_494 1995 | chip_170_579 1996 | chip_076_056 1997 | chip_215_438 1998 | chip_328_502 1999 | chip_224_438 2000 | chip_281_267 2001 | chip_226_591 2002 | chip_210_389 2003 | chip_181_263 2004 | chip_175_363 2005 | chip_134_426 2006 | chip_201_409 2007 | chip_274_408 2008 | chip_219_329 2009 | chip_206_481 2010 | chip_193_296 2011 | chip_036_117 2012 | chip_322_543 2013 | chip_126_502 2014 | chip_326_522 2015 | chip_213_313 2016 | chip_194_340 2017 | chip_220_311 2018 | chip_194_399 2019 | chip_126_301 2020 | chip_242_468 2021 | chip_241_475 2022 | chip_212_466 2023 | chip_241_599 2024 | chip_227_430 2025 | chip_269_478 2026 | chip_251_447 2027 | chip_330_499 2028 | chip_230_458 2029 | chip_153_487 2030 | chip_179_584 2031 | chip_087_197 2032 | chip_180_240 2033 | chip_215_293 2034 | chip_237_335 2035 | chip_106_107 2036 | chip_188_398 2037 | chip_124_508 2038 | chip_206_404 2039 | chip_176_510 2040 | chip_216_433 2041 | chip_194_314 2042 | chip_332_498 2043 | chip_163_592 2044 | chip_319_318 2045 | chip_174_587 2046 | chip_236_600 2047 | chip_308_540 2048 | chip_239_277 2049 | chip_163_537 2050 | chip_141_485 2051 | chip_128_126 2052 | chip_131_404 2053 | chip_229_292 2054 | chip_247_427 2055 | chip_142_492 2056 | chip_236_434 2057 | chip_207_334 2058 | chip_324_334 2059 | chip_220_268 2060 | chip_049_045 2061 | chip_155_254 2062 | chip_079_094 2063 | chip_065_073 2064 | chip_119_555 2065 | chip_247_336 2066 | chip_221_292 2067 | chip_217_428 2068 | chip_249_591 2069 | chip_100_358 2070 | chip_253_265 2071 | chip_196_345 2072 | chip_298_352 2073 | chip_292_547 2074 | chip_225_481 2075 | chip_054_098 2076 | chip_145_156 2077 | chip_134_316 2078 | chip_107_248 2079 | chip_173_370 2080 | chip_218_306 2081 | chip_169_610 2082 | chip_313_517 2083 | chip_226_309 2084 | chip_114_596 2085 | chip_234_442 2086 | chip_186_364 2087 | chip_151_240 2088 | chip_231_287 2089 | chip_219_439 2090 | chip_085_592 2091 | chip_264_467 2092 | chip_179_580 2093 | chip_219_447 2094 | chip_267_438 2095 | chip_087_200 2096 | chip_150_418 2097 | chip_219_440 2098 | chip_370_547 2099 | chip_165_522 2100 | chip_174_493 2101 | chip_137_478 2102 | chip_128_446 2103 | chip_115_448 2104 | chip_257_264 2105 | chip_242_429 2106 | chip_230_484 2107 | chip_252_275 2108 | chip_112_609 2109 | chip_203_266 2110 | chip_078_238 2111 | chip_332_499 2112 | chip_288_129 2113 | chip_251_585 2114 | chip_207_457 2115 | chip_233_331 2116 | chip_202_388 2117 | chip_368_360 2118 | chip_165_376 2119 | chip_258_483 2120 | chip_180_460 2121 | chip_268_269 2122 | chip_190_353 2123 | chip_232_024 2124 | chip_131_474 2125 | chip_194_493 2126 | chip_243_467 2127 | chip_053_114 2128 | chip_208_362 2129 | chip_193_318 2130 | chip_160_408 2131 | chip_257_273 2132 | chip_130_438 2133 | chip_183_354 2134 | chip_146_494 2135 | chip_170_346 2136 | chip_221_316 2137 | chip_157_522 2138 | chip_106_429 2139 | chip_157_483 2140 | chip_221_602 2141 | chip_143_489 2142 | chip_324_510 2143 | chip_196_458 2144 | chip_142_427 2145 | chip_221_436 2146 | chip_294_135 2147 | chip_251_587 2148 | chip_262_484 2149 | chip_229_282 2150 | chip_060_064 2151 | chip_331_501 2152 | chip_269_297 2153 | chip_194_366 2154 | chip_176_607 2155 | chip_317_368 2156 | chip_230_591 2157 | chip_222_314 2158 | chip_140_483 2159 | chip_225_482 2160 | chip_087_202 2161 | chip_322_544 2162 | chip_370_349 2163 | chip_142_501 2164 | chip_280_251 2165 | chip_236_592 2166 | chip_273_472 2167 | chip_191_406 2168 | chip_234_376 2169 | chip_060_108 2170 | chip_240_429 2171 | chip_166_459 2172 | chip_209_334 2173 | chip_311_380 2174 | chip_167_611 2175 | chip_195_407 2176 | chip_279_255 2177 | chip_193_459 2178 | chip_042_092 2179 | chip_228_281 2180 | chip_169_601 2181 | chip_131_476 2182 | chip_135_430 2183 | chip_234_265 2184 | chip_329_512 2185 | chip_248_595 2186 | chip_150_494 2187 | chip_169_387 2188 | chip_077_295 2189 | chip_147_501 2190 | chip_199_442 2191 | chip_178_023 2192 | chip_331_523 2193 | chip_129_311 2194 | chip_319_528 2195 | chip_327_507 2196 | chip_090_254 2197 | chip_110_335 2198 | chip_197_326 2199 | chip_172_413 2200 | chip_185_241 2201 | chip_210_429 2202 | chip_192_292 2203 | chip_119_505 2204 | chip_311_525 2205 | chip_059_116 2206 | chip_182_399 2207 | chip_137_492 2208 | chip_133_478 2209 | chip_415_315 2210 | chip_112_419 2211 | chip_180_579 2212 | chip_350_345 2213 | chip_129_483 2214 | chip_232_287 2215 | chip_217_423 2216 | chip_167_437 2217 | chip_053_116 2218 | chip_181_518 2219 | chip_211_283 2220 | chip_182_412 2221 | chip_253_264 2222 | chip_267_420 2223 | chip_344_459 2224 | chip_199_365 2225 | chip_065_354 2226 | chip_143_356 2227 | chip_119_559 2228 | chip_321_510 2229 | chip_033_097 2230 | chip_335_500 2231 | chip_041_115 2232 | chip_181_413 2233 | chip_137_156 2234 | chip_232_445 2235 | chip_239_284 2236 | chip_251_425 2237 | chip_163_528 2238 | chip_221_332 2239 | chip_227_280 2240 | chip_121_465 2241 | chip_210_461 2242 | chip_139_435 2243 | chip_198_365 2244 | chip_201_382 2245 | chip_291_134 2246 | chip_215_292 2247 | chip_192_583 2248 | chip_324_410 2249 | chip_165_258 2250 | chip_220_462 2251 | chip_323_417 2252 | chip_351_540 2253 | chip_236_448 2254 | chip_218_437 2255 | chip_294_348 2256 | chip_140_484 2257 | chip_243_589 2258 | chip_217_427 2259 | chip_037_148 2260 | chip_258_268 2261 | chip_226_330 2262 | chip_136_306 2263 | chip_331_499 2264 | chip_274_465 2265 | chip_091_320 2266 | chip_163_458 2267 | chip_222_337 2268 | chip_204_329 2269 | chip_084_207 2270 | chip_169_599 2271 | chip_371_349 2272 | chip_421_324 2273 | chip_206_337 2274 | chip_131_442 2275 | chip_216_429 2276 | chip_366_348 2277 | chip_201_361 2278 | chip_169_258 2279 | chip_217_283 2280 | chip_213_435 2281 | chip_124_446 2282 | chip_105_334 2283 | chip_226_289 2284 | chip_131_429 2285 | chip_148_501 2286 | chip_120_322 2287 | chip_155_408 2288 | chip_245_596 2289 | chip_306_543 2290 | chip_232_596 2291 | chip_132_484 2292 | chip_280_560 2293 | chip_145_338 2294 | chip_222_292 2295 | chip_101_306 2296 | chip_222_328 2297 | chip_208_299 2298 | chip_235_335 2299 | chip_197_319 2300 | chip_208_428 2301 | chip_267_582 2302 | chip_111_319 2303 | chip_216_442 2304 | chip_065_288 2305 | chip_170_267 2306 | chip_252_585 2307 | chip_162_483 2308 | chip_237_598 2309 | chip_116_427 2310 | chip_325_316 2311 | chip_209_430 2312 | chip_211_282 2313 | chip_051_172 2314 | chip_115_486 2315 | chip_121_506 2316 | chip_134_472 2317 | chip_205_329 2318 | chip_125_420 2319 | chip_145_443 2320 | chip_182_579 2321 | chip_292_553 2322 | chip_162_595 2323 | chip_244_451 2324 | chip_246_592 2325 | chip_059_190 2326 | chip_123_446 2327 | chip_193_407 2328 | chip_181_507 2329 | chip_200_323 2330 | chip_249_440 2331 | chip_234_280 2332 | chip_062_120 2333 | chip_099_331 2334 | chip_099_495 2335 | chip_274_472 2336 | chip_148_496 2337 | chip_230_273 2338 | chip_288_119 2339 | chip_093_358 2340 | chip_229_379 2341 | chip_120_509 2342 | chip_048_090 2343 | chip_247_596 2344 | chip_251_485 2345 | chip_147_434 2346 | chip_204_344 2347 | chip_189_395 2348 | chip_233_597 2349 | chip_160_591 2350 | chip_203_414 2351 | chip_144_474 2352 | chip_238_593 2353 | chip_196_323 2354 | chip_319_533 2355 | chip_190_285 2356 | chip_220_335 2357 | chip_214_304 2358 | chip_114_506 2359 | chip_242_591 2360 | chip_325_314 2361 | chip_235_456 2362 | chip_136_470 2363 | chip_154_422 2364 | chip_114_592 2365 | chip_157_523 2366 | chip_192_476 2367 | chip_215_314 2368 | chip_181_250 2369 | chip_257_445 2370 | chip_178_497 2371 | chip_205_429 2372 | chip_063_116 2373 | chip_106_408 2374 | chip_105_418 2375 | chip_249_594 2376 | chip_207_323 2377 | chip_236_596 2378 | chip_243_595 2379 | chip_127_470 2380 | chip_141_486 2381 | chip_170_586 2382 | chip_151_248 2383 | chip_357_363 2384 | chip_253_448 2385 | chip_121_505 2386 | chip_204_506 2387 | chip_166_260 2388 | chip_151_490 2389 | chip_272_450 2390 | chip_166_261 2391 | chip_131_507 2392 | chip_195_486 2393 | chip_222_480 2394 | chip_322_417 2395 | chip_180_606 2396 | chip_235_444 2397 | chip_353_327 2398 | chip_088_265 2399 | chip_063_352 2400 | chip_229_288 2401 | chip_227_330 2402 | chip_226_457 2403 | chip_233_289 2404 | chip_192_280 2405 | chip_125_504 2406 | chip_267_434 2407 | chip_109_334 2408 | chip_141_402 2409 | chip_124_129 2410 | chip_207_298 2411 | chip_219_468 2412 | chip_139_488 2413 | chip_066_090 2414 | chip_045_243 2415 | chip_126_497 2416 | chip_331_509 2417 | chip_225_484 2418 | chip_345_471 2419 | chip_139_480 2420 | chip_133_472 2421 | chip_212_411 2422 | chip_091_334 2423 | chip_127_445 2424 | chip_161_533 2425 | chip_094_357 2426 | chip_207_434 2427 | chip_127_503 2428 | chip_274_469 2429 | chip_222_332 2430 | chip_238_458 2431 | chip_150_469 2432 | chip_216_454 2433 | chip_273_474 2434 | chip_052_239 2435 | chip_330_394 2436 | chip_225_274 2437 | chip_318_517 2438 | chip_057_194 2439 | chip_197_398 2440 | chip_167_587 2441 | chip_188_428 2442 | chip_340_326 2443 | chip_276_473 2444 | chip_250_585 2445 | chip_259_444 2446 | chip_094_354 2447 | chip_177_243 2448 | chip_197_366 2449 | chip_179_603 2450 | chip_132_134 2451 | chip_127_479 2452 | chip_120_503 2453 | chip_071_257 2454 | chip_095_323 2455 | chip_253_286 2456 | chip_239_335 2457 | chip_101_102 2458 | chip_151_604 2459 | chip_333_500 2460 | chip_152_602 2461 | chip_165_530 2462 | chip_140_470 2463 | chip_062_257 2464 | chip_182_239 2465 | chip_124_492 2466 | chip_242_470 2467 | chip_208_272 2468 | chip_130_442 2469 | chip_032_111 2470 | chip_191_516 2471 | chip_191_471 2472 | chip_240_593 2473 | chip_094_336 2474 | chip_154_468 2475 | chip_299_548 2476 | chip_321_527 2477 | chip_221_354 2478 | chip_273_464 2479 | chip_267_432 2480 | chip_181_237 2481 | chip_242_441 2482 | chip_242_467 2483 | chip_184_407 2484 | chip_236_289 2485 | chip_167_406 2486 | chip_251_260 2487 | chip_109_416 2488 | chip_218_451 2489 | chip_158_393 2490 | chip_169_407 2491 | chip_183_365 2492 | chip_110_336 2493 | chip_093_355 2494 | chip_109_471 2495 | chip_235_435 2496 | chip_049_103 2497 | chip_173_503 2498 | chip_324_312 2499 | chip_093_213 2500 | chip_194_502 2501 | chip_218_431 2502 | chip_110_343 2503 | chip_118_467 2504 | chip_238_428 2505 | chip_115_504 2506 | chip_284_263 2507 | chip_257_276 2508 | chip_100_307 2509 | chip_273_459 2510 | chip_145_372 2511 | chip_137_275 2512 | chip_236_188 2513 | chip_242_469 2514 | chip_210_334 2515 | chip_130_506 2516 | chip_141_484 2517 | chip_094_355 2518 | chip_309_517 2519 | chip_227_605 2520 | chip_165_412 2521 | chip_160_400 2522 | chip_299_453 2523 | chip_181_404 2524 | chip_077_616 2525 | chip_122_445 2526 | chip_194_296 2527 | chip_191_459 2528 | chip_255_446 2529 | chip_054_264 2530 | chip_179_579 2531 | chip_165_593 2532 | chip_130_487 2533 | chip_209_387 2534 | chip_117_493 2535 | chip_212_289 2536 | chip_042_100 2537 | chip_331_510 2538 | chip_101_318 2539 | chip_134_496 2540 | chip_060_118 2541 | chip_236_284 2542 | chip_133_303 2543 | chip_044_169 2544 | chip_194_406 2545 | chip_123_391 2546 | chip_037_101 2547 | chip_264_583 2548 | chip_168_408 2549 | chip_134_508 2550 | chip_093_339 2551 | chip_139_428 2552 | chip_360_323 2553 | chip_266_435 2554 | chip_108_330 2555 | chip_143_479 2556 | chip_343_339 2557 | chip_079_235 2558 | chip_165_517 2559 | chip_169_384 2560 | chip_186_488 2561 | chip_162_533 2562 | chip_227_276 2563 | chip_247_597 2564 | chip_326_521 2565 | chip_220_428 2566 | chip_090_337 2567 | chip_141_434 2568 | chip_127_499 2569 | chip_106_336 2570 | chip_112_404 2571 | chip_318_532 2572 | chip_244_335 2573 | chip_241_331 2574 | chip_219_430 2575 | chip_142_497 2576 | chip_254_593 2577 | chip_066_122 2578 | chip_241_441 2579 | chip_135_324 2580 | chip_048_171 2581 | chip_252_285 2582 | chip_149_491 2583 | chip_133_482 2584 | chip_280_308 2585 | chip_075_171 2586 | chip_165_599 2587 | chip_322_534 2588 | chip_161_468 2589 | chip_183_604 2590 | chip_063_356 2591 | chip_252_265 2592 | chip_191_412 2593 | chip_194_256 2594 | chip_348_330 2595 | chip_167_343 2596 | chip_325_315 2597 | chip_136_276 2598 | chip_228_324 2599 | chip_250_589 2600 | chip_176_580 2601 | chip_217_426 2602 | chip_168_589 2603 | chip_036_118 2604 | chip_271_265 2605 | chip_139_430 2606 | chip_233_355 2607 | chip_102_305 2608 | chip_272_460 2609 | chip_104_329 2610 | chip_299_349 2611 | chip_370_360 2612 | chip_202_016 2613 | chip_170_264 2614 | chip_210_482 2615 | chip_179_405 2616 | chip_185_302 2617 | chip_226_328 2618 | chip_223_437 2619 | chip_074_267 2620 | chip_171_518 2621 | chip_098_103 2622 | chip_192_366 2623 | chip_188_420 2624 | chip_368_359 2625 | chip_102_344 2626 | chip_145_332 2627 | chip_247_428 2628 | chip_244_591 2629 | chip_213_290 2630 | chip_105_103 2631 | chip_235_289 2632 | chip_312_433 2633 | chip_274_474 2634 | chip_169_586 2635 | chip_243_586 2636 | chip_249_589 2637 | chip_177_239 2638 | chip_180_284 2639 | chip_113_449 2640 | chip_154_472 2641 | chip_219_333 2642 | chip_341_533 2643 | chip_243_452 2644 | chip_246_428 2645 | chip_197_591 2646 | chip_240_277 2647 | chip_229_330 2648 | chip_243_334 2649 | chip_229_276 2650 | chip_211_336 2651 | chip_144_485 2652 | chip_176_426 2653 | chip_161_459 2654 | chip_279_564 2655 | chip_298_431 2656 | chip_196_288 2657 | chip_173_488 2658 | chip_109_335 2659 | chip_054_199 2660 | chip_167_482 2661 | chip_107_104 2662 | chip_166_482 2663 | chip_189_494 2664 | chip_251_418 2665 | chip_193_364 2666 | chip_195_484 2667 | chip_243_593 2668 | chip_172_579 2669 | chip_166_412 2670 | chip_248_270 2671 | chip_049_070 2672 | chip_231_331 2673 | chip_211_284 2674 | chip_189_515 2675 | chip_267_453 2676 | chip_087_216 2677 | chip_236_450 2678 | chip_149_479 2679 | chip_128_501 2680 | chip_118_503 2681 | chip_215_452 2682 | chip_104_334 2683 | chip_133_340 2684 | chip_093_356 2685 | chip_180_584 2686 | chip_322_524 2687 | chip_151_441 2688 | chip_151_592 2689 | chip_268_446 2690 | chip_194_337 2691 | chip_256_356 2692 | chip_353_423 2693 | chip_163_371 2694 | chip_133_492 2695 | chip_049_231 2696 | chip_049_071 2697 | chip_116_447 2698 | chip_156_466 2699 | chip_345_470 2700 | chip_129_435 2701 | chip_156_475 2702 | chip_331_508 2703 | chip_270_477 2704 | chip_231_280 2705 | chip_207_392 2706 | chip_215_306 2707 | chip_124_433 2708 | chip_162_406 2709 | chip_245_332 2710 | chip_223_294 2711 | chip_116_346 2712 | chip_183_606 2713 | chip_156_480 2714 | chip_327_516 2715 | chip_126_480 2716 | chip_178_505 2717 | chip_297_352 2718 | chip_254_260 2719 | chip_234_591 2720 | chip_126_304 2721 | chip_125_481 2722 | chip_166_523 2723 | chip_066_289 2724 | chip_220_444 2725 | chip_283_262 2726 | chip_058_076 2727 | chip_101_343 2728 | chip_270_264 2729 | chip_217_425 2730 | chip_220_487 2731 | chip_166_435 2732 | chip_126_445 2733 | chip_102_330 2734 | chip_295_540 2735 | chip_252_263 2736 | chip_315_541 2737 | chip_253_257 2738 | chip_173_413 2739 | chip_053_176 2740 | chip_266_362 2741 | chip_108_204 2742 | chip_137_498 2743 | chip_179_605 2744 | chip_154_489 2745 | chip_061_191 2746 | chip_288_311 2747 | chip_128_481 2748 | chip_228_023 2749 | chip_216_426 2750 | chip_262_434 2751 | chip_217_336 2752 | chip_181_427 2753 | chip_238_331 2754 | chip_222_185 2755 | chip_174_017 2756 | chip_220_305 2757 | chip_097_323 2758 | chip_114_441 2759 | chip_118_492 2760 | chip_130_439 2761 | chip_121_507 2762 | chip_232_023 2763 | chip_210_444 2764 | chip_153_466 2765 | chip_205_389 2766 | chip_273_460 2767 | chip_154_399 2768 | chip_115_487 2769 | chip_319_530 2770 | chip_242_450 2771 | chip_351_328 2772 | chip_182_401 2773 | chip_122_482 2774 | chip_318_525 2775 | chip_041_089 2776 | chip_224_375 2777 | chip_221_335 2778 | chip_227_293 2779 | chip_247_335 2780 | chip_152_491 2781 | chip_139_409 2782 | chip_323_387 2783 | chip_133_498 2784 | chip_218_436 2785 | chip_209_333 2786 | chip_199_367 2787 | chip_270_478 2788 | chip_054_200 2789 | chip_063_349 2790 | chip_220_436 2791 | chip_113_493 2792 | chip_344_401 2793 | chip_246_444 2794 | chip_171_579 2795 | chip_164_478 2796 | chip_334_505 2797 | chip_171_589 2798 | chip_142_443 2799 | chip_198_356 2800 | chip_221_313 2801 | chip_097_328 2802 | chip_148_493 2803 | chip_167_456 2804 | chip_273_470 2805 | chip_244_446 2806 | chip_174_347 2807 | chip_198_594 2808 | chip_195_350 2809 | chip_254_265 2810 | chip_238_336 2811 | chip_111_449 2812 | chip_421_327 2813 | chip_420_322 2814 | chip_370_348 2815 | chip_210_393 2816 | chip_311_470 2817 | chip_163_522 2818 | chip_306_541 2819 | chip_416_319 2820 | chip_182_237 2821 | chip_093_322 2822 | chip_113_434 2823 | chip_370_342 2824 | chip_073_105 2825 | chip_340_533 2826 | chip_265_567 2827 | chip_201_319 2828 | chip_239_336 2829 | chip_179_518 2830 | chip_234_444 2831 | chip_094_617 2832 | chip_326_499 2833 | chip_184_458 2834 | chip_150_267 2835 | chip_139_434 2836 | chip_314_539 2837 | chip_163_455 2838 | chip_237_290 2839 | chip_252_589 2840 | chip_120_383 2841 | chip_219_256 2842 | chip_220_257 2843 | chip_153_480 2844 | chip_298_454 2845 | chip_154_473 2846 | chip_313_521 2847 | chip_207_477 2848 | chip_095_358 2849 | chip_248_590 2850 | chip_223_491 2851 | chip_161_480 2852 | chip_250_270 2853 | chip_283_558 2854 | chip_150_266 2855 | chip_267_306 2856 | chip_145_440 2857 | chip_206_297 2858 | chip_341_534 2859 | chip_117_503 2860 | chip_242_451 2861 | chip_147_488 2862 | chip_234_354 2863 | chip_214_401 2864 | chip_056_197 2865 | chip_226_370 2866 | chip_116_438 2867 | chip_177_581 2868 | chip_223_332 2869 | chip_210_329 2870 | chip_230_309 2871 | chip_185_503 2872 | chip_281_559 2873 | chip_070_352 2874 | chip_128_442 2875 | chip_320_518 2876 | chip_122_123 2877 | chip_253_416 2878 | chip_254_446 2879 | chip_217_435 2880 | chip_254_272 2881 | chip_174_360 2882 | chip_123_448 2883 | chip_199_595 2884 | chip_287_307 2885 | chip_249_271 2886 | chip_100_318 2887 | chip_283_559 2888 | chip_366_355 2889 | chip_182_022 2890 | chip_225_429 2891 | chip_316_543 2892 | chip_199_282 2893 | chip_123_560 2894 | chip_181_606 2895 | chip_191_286 2896 | chip_222_467 2897 | chip_114_447 2898 | chip_106_405 2899 | chip_256_266 2900 | chip_166_580 2901 | chip_176_581 2902 | chip_204_315 2903 | chip_182_499 2904 | chip_123_485 2905 | chip_222_281 2906 | chip_329_496 2907 | chip_217_314 2908 | chip_324_314 2909 | chip_208_416 2910 | chip_252_438 2911 | chip_100_101 2912 | chip_120_556 2913 | chip_198_318 2914 | chip_144_472 2915 | chip_106_288 2916 | chip_183_484 2917 | chip_141_491 2918 | chip_225_327 2919 | chip_151_375 2920 | chip_125_433 2921 | chip_257_265 2922 | chip_250_588 2923 | chip_159_618 2924 | chip_329_493 2925 | chip_327_527 2926 | chip_293_135 2927 | chip_258_587 2928 | chip_133_443 2929 | chip_219_270 2930 | chip_112_587 2931 | chip_336_500 2932 | chip_321_314 2933 | chip_308_484 2934 | chip_220_329 2935 | chip_177_603 2936 | chip_277_252 2937 | chip_057_118 2938 | chip_421_326 2939 | chip_314_516 2940 | chip_218_318 2941 | chip_098_327 2942 | chip_060_083 2943 | chip_334_496 2944 | chip_216_312 2945 | chip_156_390 2946 | chip_167_446 2947 | chip_253_274 2948 | chip_218_335 2949 | chip_044_226 2950 | chip_213_310 2951 | chip_207_420 2952 | chip_221_432 2953 | chip_216_436 2954 | chip_052_238 2955 | chip_130_505 2956 | chip_203_017 2957 | chip_248_445 2958 | chip_165_419 2959 | chip_249_284 2960 | chip_200_281 2961 | chip_072_257 2962 | chip_068_351 2963 | chip_324_315 2964 | chip_216_287 2965 | chip_179_424 2966 | chip_055_202 2967 | chip_170_410 2968 | chip_226_484 2969 | chip_421_329 2970 | chip_215_327 2971 | chip_227_269 2972 | chip_064_350 2973 | chip_209_285 2974 | chip_063_256 2975 | chip_302_430 2976 | chip_096_358 2977 | chip_131_320 2978 | chip_350_560 2979 | chip_164_392 2980 | chip_117_114 2981 | chip_264_466 2982 | chip_333_507 2983 | chip_152_468 2984 | chip_153_488 2985 | chip_089_360 2986 | chip_113_606 2987 | chip_148_478 2988 | chip_079_333 2989 | chip_184_392 2990 | chip_203_397 2991 | chip_224_331 2992 | chip_179_308 2993 | chip_069_294 2994 | chip_054_083 2995 | chip_217_313 2996 | chip_162_370 2997 | chip_363_321 2998 | chip_114_505 2999 | chip_231_295 3000 | chip_194_331 3001 | chip_127_317 3002 | chip_209_425 3003 | chip_196_291 3004 | chip_142_489 3005 | chip_243_445 3006 | chip_219_451 3007 | chip_182_502 3008 | chip_174_520 3009 | chip_184_237 3010 | chip_119_351 3011 | chip_353_328 3012 | chip_267_371 3013 | chip_238_330 3014 | chip_213_438 3015 | chip_205_285 3016 | chip_170_585 3017 | chip_081_345 3018 | chip_169_026 3019 | chip_129_442 3020 | chip_156_008 3021 | chip_272_468 3022 | chip_253_263 3023 | chip_120_448 3024 | chip_179_466 3025 | chip_216_284 3026 | chip_214_283 3027 | chip_372_349 3028 | chip_129_445 3029 | chip_154_402 3030 | chip_324_528 3031 | chip_061_195 3032 | chip_219_315 3033 | chip_208_390 3034 | chip_320_510 3035 | chip_030_093 3036 | chip_164_393 3037 | chip_114_402 3038 | chip_137_344 3039 | chip_051_173 3040 | chip_113_479 3041 | chip_310_469 3042 | chip_154_421 3043 | chip_126_479 3044 | chip_256_424 3045 | chip_318_531 3046 | chip_122_559 3047 | chip_167_599 3048 | chip_189_260 3049 | chip_213_456 3050 | chip_284_311 3051 | chip_328_509 3052 | chip_034_191 3053 | chip_163_250 3054 | chip_135_401 3055 | chip_183_497 3056 | chip_221_331 3057 | chip_048_091 3058 | chip_107_335 3059 | chip_149_487 3060 | chip_225_333 3061 | chip_205_342 3062 | chip_190_368 3063 | chip_200_324 3064 | chip_165_516 3065 | chip_162_524 3066 | chip_055_091 3067 | chip_213_336 3068 | chip_135_489 3069 | chip_229_291 3070 | chip_177_608 3071 | chip_236_278 3072 | chip_312_476 3073 | chip_260_413 3074 | chip_166_264 3075 | chip_114_429 3076 | chip_306_525 3077 | chip_099_310 3078 | chip_228_605 3079 | chip_167_383 3080 | chip_054_265 3081 | chip_083_174 3082 | chip_325_524 3083 | chip_263_288 3084 | -------------------------------------------------------------------------------- /data_splits/multi_temporal_crop_classification/validation_data.txt: -------------------------------------------------------------------------------- 1 | chip_142_475 2 | chip_091_197 3 | chip_278_432 4 | chip_229_280 5 | chip_250_448 6 | chip_039_109 7 | chip_121_436 8 | chip_316_522 9 | chip_121_491 10 | chip_104_318 11 | chip_184_503 12 | chip_117_426 13 | chip_230_315 14 | chip_143_480 15 | chip_304_299 16 | chip_100_330 17 | chip_089_201 18 | chip_116_449 19 | chip_092_338 20 | chip_102_327 21 | chip_119_439 22 | chip_251_589 23 | chip_183_614 24 | chip_172_611 25 | chip_144_489 26 | chip_265_566 27 | chip_140_487 28 | chip_096_323 29 | chip_152_477 30 | chip_150_497 31 | chip_121_492 32 | chip_202_419 33 | chip_102_345 34 | chip_143_411 35 | chip_118_558 36 | chip_207_324 37 | chip_166_434 38 | chip_125_446 39 | chip_130_471 40 | chip_250_594 41 | chip_289_444 42 | chip_218_379 43 | chip_173_578 44 | chip_154_486 45 | chip_154_476 46 | chip_387_325 47 | chip_288_426 48 | chip_142_434 49 | chip_281_264 50 | chip_190_428 51 | chip_146_481 52 | chip_219_378 53 | chip_033_196 54 | chip_149_477 55 | chip_235_584 56 | chip_218_327 57 | chip_210_307 58 | chip_274_466 59 | chip_252_448 60 | chip_117_323 61 | chip_324_517 62 | chip_185_239 63 | chip_126_305 64 | chip_118_484 65 | chip_256_580 66 | chip_151_487 67 | chip_139_486 68 | chip_127_304 69 | chip_113_422 70 | chip_125_482 71 | chip_255_276 72 | chip_114_448 73 | chip_128_443 74 | chip_239_476 75 | chip_174_400 76 | chip_232_294 77 | chip_193_419 78 | chip_232_484 79 | chip_373_344 80 | chip_088_360 81 | chip_118_493 82 | chip_142_470 83 | chip_234_261 84 | chip_114_438 85 | chip_320_314 86 | chip_201_402 87 | chip_132_490 88 | chip_159_617 89 | chip_137_283 90 | chip_126_396 91 | chip_263_449 92 | chip_123_343 93 | chip_113_423 94 | chip_063_037 95 | chip_143_485 96 | chip_208_361 97 | chip_219_288 98 | chip_053_113 99 | chip_145_497 100 | chip_120_555 101 | chip_089_589 102 | chip_239_597 103 | chip_238_591 104 | chip_171_521 105 | chip_238_590 106 | chip_113_038 107 | chip_244_593 108 | chip_150_492 109 | chip_204_417 110 | chip_338_531 111 | chip_164_444 112 | chip_261_412 113 | chip_164_262 114 | chip_292_540 115 | chip_089_217 116 | chip_184_400 117 | chip_115_446 118 | chip_200_274 119 | chip_193_315 120 | chip_237_597 121 | chip_159_407 122 | chip_180_407 123 | chip_249_449 124 | chip_253_272 125 | chip_342_533 126 | chip_104_104 127 | chip_134_437 128 | chip_154_253 129 | chip_164_522 130 | chip_187_406 131 | chip_226_335 132 | chip_100_306 133 | chip_120_505 134 | chip_182_398 135 | chip_153_476 136 | chip_156_402 137 | chip_106_407 138 | chip_232_329 139 | chip_095_218 140 | chip_131_433 141 | chip_144_434 142 | chip_284_564 143 | chip_098_338 144 | chip_219_313 145 | chip_155_468 146 | chip_052_043 147 | chip_258_444 148 | chip_175_515 149 | chip_205_286 150 | chip_231_595 151 | chip_181_238 152 | chip_125_441 153 | chip_100_355 154 | chip_182_497 155 | chip_273_471 156 | chip_257_271 157 | chip_320_519 158 | chip_233_293 159 | chip_081_337 160 | chip_239_430 161 | chip_256_276 162 | chip_091_364 163 | chip_228_489 164 | chip_234_333 165 | chip_221_439 166 | chip_254_444 167 | chip_138_477 168 | chip_165_404 169 | chip_118_447 170 | chip_002_060 171 | chip_165_469 172 | chip_171_585 173 | chip_154_601 174 | chip_191_473 175 | chip_262_414 176 | chip_190_375 177 | chip_189_429 178 | chip_244_267 179 | chip_139_477 180 | chip_210_331 181 | chip_146_343 182 | chip_293_546 183 | chip_215_305 184 | chip_200_016 185 | chip_207_451 186 | chip_420_326 187 | chip_141_473 188 | chip_239_443 189 | chip_075_105 190 | chip_385_323 191 | chip_302_542 192 | chip_229_272 193 | chip_169_433 194 | chip_129_480 195 | chip_316_524 196 | chip_158_522 197 | chip_269_299 198 | chip_187_259 199 | chip_094_321 200 | chip_210_277 201 | chip_120_502 202 | chip_034_147 203 | chip_239_269 204 | chip_156_476 205 | chip_330_414 206 | chip_117_501 207 | chip_282_404 208 | chip_146_470 209 | chip_009_061 210 | chip_202_336 211 | chip_125_502 212 | chip_216_460 213 | chip_084_175 214 | chip_118_555 215 | chip_134_441 216 | chip_391_323 217 | chip_258_270 218 | chip_217_330 219 | chip_324_525 220 | chip_347_343 221 | chip_169_399 222 | chip_186_348 223 | chip_146_469 224 | chip_208_391 225 | chip_149_263 226 | chip_181_579 227 | chip_215_454 228 | chip_156_500 229 | chip_228_297 230 | chip_113_421 231 | chip_148_491 232 | chip_220_308 233 | chip_045_106 234 | chip_165_464 235 | chip_182_406 236 | chip_131_496 237 | chip_153_467 238 | chip_072_280 239 | chip_115_505 240 | chip_186_422 241 | chip_054_201 242 | chip_248_276 243 | chip_151_477 244 | chip_124_303 245 | chip_164_477 246 | chip_112_330 247 | chip_180_339 248 | chip_057_087 249 | chip_275_408 250 | chip_250_271 251 | chip_332_332 252 | chip_296_352 253 | chip_315_514 254 | chip_126_439 255 | chip_232_269 256 | chip_202_392 257 | chip_250_272 258 | chip_392_325 259 | chip_024_087 260 | chip_221_442 261 | chip_126_446 262 | chip_245_580 263 | chip_228_330 264 | chip_330_498 265 | chip_244_332 266 | chip_313_475 267 | chip_098_358 268 | chip_369_349 269 | chip_216_467 270 | chip_060_116 271 | chip_164_528 272 | chip_216_280 273 | chip_213_466 274 | chip_132_471 275 | chip_227_461 276 | chip_291_553 277 | chip_081_218 278 | chip_319_526 279 | chip_365_345 280 | chip_253_443 281 | chip_097_358 282 | chip_165_601 283 | chip_161_423 284 | chip_220_431 285 | chip_159_594 286 | chip_270_267 287 | chip_178_022 288 | chip_191_364 289 | chip_106_415 290 | chip_223_438 291 | chip_250_587 292 | chip_174_348 293 | chip_128_445 294 | chip_193_312 295 | chip_298_453 296 | chip_169_514 297 | chip_110_041 298 | chip_181_503 299 | chip_327_499 300 | chip_179_238 301 | chip_281_560 302 | chip_306_527 303 | chip_139_476 304 | chip_130_430 305 | chip_061_115 306 | chip_169_032 307 | chip_170_593 308 | chip_215_429 309 | chip_196_369 310 | chip_256_269 311 | chip_334_525 312 | chip_359_558 313 | chip_162_479 314 | chip_102_332 315 | chip_239_459 316 | chip_130_411 317 | chip_236_445 318 | chip_327_510 319 | chip_163_518 320 | chip_181_023 321 | chip_101_332 322 | chip_149_493 323 | chip_123_447 324 | chip_127_427 325 | chip_120_508 326 | chip_238_285 327 | chip_271_264 328 | chip_132_474 329 | chip_180_609 330 | chip_112_433 331 | chip_149_478 332 | chip_244_595 333 | chip_092_323 334 | chip_219_323 335 | chip_133_474 336 | chip_141_482 337 | chip_220_334 338 | chip_270_263 339 | chip_259_331 340 | chip_335_506 341 | chip_163_601 342 | chip_230_279 343 | chip_111_611 344 | chip_115_502 345 | chip_113_043 346 | chip_242_332 347 | chip_146_473 348 | chip_164_481 349 | chip_284_307 350 | chip_279_252 351 | chip_149_486 352 | chip_104_498 353 | chip_124_124 354 | chip_081_333 355 | chip_129_505 356 | chip_120_443 357 | chip_233_283 358 | chip_238_469 359 | chip_245_595 360 | chip_314_518 361 | chip_140_482 362 | chip_166_581 363 | chip_283_264 364 | chip_251_583 365 | chip_300_412 366 | chip_220_309 367 | chip_112_422 368 | chip_203_308 369 | chip_061_092 370 | chip_138_475 371 | chip_234_443 372 | chip_183_514 373 | chip_256_267 374 | chip_126_481 375 | chip_130_473 376 | chip_316_383 377 | chip_150_488 378 | chip_144_487 379 | chip_144_473 380 | chip_228_323 381 | chip_113_424 382 | chip_061_190 383 | chip_200_318 384 | chip_134_320 385 | chip_187_482 386 | chip_228_280 387 | chip_331_527 388 | chip_089_267 389 | chip_191_368 390 | chip_221_307 391 | chip_221_438 392 | chip_334_470 393 | chip_155_476 394 | chip_319_523 395 | chip_199_321 396 | chip_124_439 397 | chip_101_103 398 | chip_138_430 399 | chip_195_410 400 | chip_126_491 401 | chip_080_345 402 | chip_131_316 403 | chip_232_595 404 | chip_370_344 405 | chip_290_557 406 | chip_199_283 407 | chip_151_470 408 | chip_235_482 409 | chip_127_491 410 | chip_110_421 411 | chip_128_482 412 | chip_153_405 413 | chip_197_368 414 | chip_222_315 415 | chip_106_442 416 | chip_215_467 417 | chip_068_347 418 | chip_149_262 419 | chip_059_259 420 | chip_281_252 421 | chip_176_506 422 | chip_134_489 423 | chip_179_609 424 | chip_257_269 425 | chip_237_434 426 | chip_106_441 427 | chip_088_362 428 | chip_164_519 429 | chip_063_084 430 | chip_237_441 431 | chip_183_481 432 | chip_062_081 433 | chip_292_455 434 | chip_154_351 435 | chip_351_399 436 | chip_153_485 437 | chip_207_423 438 | chip_201_445 439 | chip_206_301 440 | chip_059_082 441 | chip_159_420 442 | chip_255_273 443 | chip_147_496 444 | chip_187_401 445 | chip_128_312 446 | chip_065_059 447 | chip_213_315 448 | chip_160_399 449 | chip_177_339 450 | chip_195_308 451 | chip_122_439 452 | chip_208_431 453 | chip_229_293 454 | chip_194_485 455 | chip_138_476 456 | chip_335_498 457 | chip_237_364 458 | chip_253_256 459 | chip_126_506 460 | chip_216_183 461 | chip_136_486 462 | chip_157_421 463 | chip_065_117 464 | chip_100_356 465 | chip_304_528 466 | chip_379_348 467 | chip_179_610 468 | chip_177_517 469 | chip_241_288 470 | chip_186_511 471 | chip_120_430 472 | chip_243_466 473 | chip_084_363 474 | chip_197_488 475 | chip_304_343 476 | chip_124_419 477 | chip_224_334 478 | chip_164_527 479 | chip_245_592 480 | chip_141_500 481 | chip_330_501 482 | chip_330_513 483 | chip_125_303 484 | chip_230_311 485 | chip_145_489 486 | chip_164_523 487 | chip_329_510 488 | chip_376_348 489 | chip_114_433 490 | chip_157_332 491 | chip_172_577 492 | chip_141_481 493 | chip_207_438 494 | chip_168_268 495 | chip_251_448 496 | chip_330_510 497 | chip_224_489 498 | chip_233_484 499 | chip_232_322 500 | chip_133_440 501 | chip_174_407 502 | chip_083_210 503 | chip_191_492 504 | chip_174_015 505 | chip_209_275 506 | chip_132_482 507 | chip_248_444 508 | chip_046_088 509 | chip_034_112 510 | chip_300_453 511 | chip_067_077 512 | chip_237_592 513 | chip_183_489 514 | chip_234_292 515 | chip_141_401 516 | chip_292_542 517 | chip_200_365 518 | chip_061_194 519 | chip_351_330 520 | chip_065_352 521 | chip_314_514 522 | chip_211_410 523 | chip_195_406 524 | chip_112_493 525 | chip_142_433 526 | chip_172_586 527 | chip_181_504 528 | chip_108_333 529 | chip_213_300 530 | chip_104_417 531 | chip_151_480 532 | chip_201_364 533 | chip_235_446 534 | chip_163_479 535 | chip_271_486 536 | chip_129_479 537 | chip_182_612 538 | chip_231_274 539 | chip_166_597 540 | chip_159_417 541 | chip_140_442 542 | chip_241_587 543 | chip_183_502 544 | chip_130_486 545 | chip_160_479 546 | chip_034_146 547 | chip_139_487 548 | chip_203_337 549 | chip_129_473 550 | chip_262_441 551 | chip_164_446 552 | chip_232_279 553 | chip_126_303 554 | chip_168_386 555 | chip_242_280 556 | chip_317_513 557 | chip_227_283 558 | chip_133_360 559 | chip_202_425 560 | chip_121_446 561 | chip_275_409 562 | chip_250_258 563 | chip_214_467 564 | chip_063_198 565 | chip_147_481 566 | chip_220_286 567 | chip_156_332 568 | chip_113_431 569 | chip_252_256 570 | chip_151_486 571 | chip_213_416 572 | chip_183_378 573 | chip_276_403 574 | chip_183_239 575 | chip_271_429 576 | chip_052_076 577 | chip_321_340 578 | chip_122_446 579 | chip_115_406 580 | chip_149_474 581 | chip_213_293 582 | chip_329_511 583 | chip_221_328 584 | chip_219_478 585 | chip_144_335 586 | chip_153_490 587 | chip_165_447 588 | chip_143_474 589 | chip_080_346 590 | chip_237_449 591 | chip_140_494 592 | chip_143_496 593 | chip_194_468 594 | chip_138_363 595 | chip_086_360 596 | chip_236_273 597 | chip_194_264 598 | chip_127_396 599 | chip_252_597 600 | chip_050_168 601 | chip_236_335 602 | chip_252_583 603 | chip_232_333 604 | chip_253_447 605 | chip_252_592 606 | chip_236_375 607 | chip_156_387 608 | chip_225_335 609 | chip_066_060 610 | chip_262_413 611 | chip_234_334 612 | chip_186_408 613 | chip_227_277 614 | chip_189_360 615 | chip_201_384 616 | chip_234_289 617 | chip_129_499 618 | chip_328_498 619 | chip_199_425 620 | chip_217_429 621 | chip_057_195 622 | chip_173_350 623 | chip_194_345 624 | chip_301_342 625 | chip_130_479 626 | chip_110_417 627 | chip_217_335 628 | chip_184_356 629 | chip_163_526 630 | chip_218_292 631 | chip_065_120 632 | chip_243_450 633 | chip_191_495 634 | chip_094_337 635 | chip_131_428 636 | chip_326_316 637 | chip_167_586 638 | chip_218_314 639 | chip_124_499 640 | chip_185_410 641 | chip_149_497 642 | chip_183_407 643 | chip_077_300 644 | chip_330_516 645 | chip_269_263 646 | chip_256_271 647 | chip_159_461 648 | chip_171_352 649 | chip_204_480 650 | chip_166_406 651 | chip_150_486 652 | chip_110_450 653 | chip_182_240 654 | chip_094_358 655 | chip_243_263 656 | chip_233_297 657 | chip_236_593 658 | chip_246_334 659 | chip_271_440 660 | chip_170_402 661 | chip_141_487 662 | chip_191_510 663 | chip_143_488 664 | chip_240_447 665 | chip_131_422 666 | chip_141_409 667 | chip_192_490 668 | chip_369_348 669 | chip_135_484 670 | chip_247_333 671 | chip_122_504 672 | chip_088_617 673 | chip_207_388 674 | chip_049_232 675 | chip_228_466 676 | chip_242_597 677 | chip_165_523 678 | chip_227_432 679 | chip_218_427 680 | chip_183_605 681 | chip_051_176 682 | chip_255_275 683 | chip_100_357 684 | chip_236_336 685 | chip_257_277 686 | chip_213_332 687 | chip_196_360 688 | chip_151_469 689 | chip_077_612 690 | chip_127_502 691 | chip_369_341 692 | chip_159_405 693 | chip_252_595 694 | chip_168_274 695 | chip_096_341 696 | chip_215_425 697 | chip_080_334 698 | chip_121_502 699 | chip_319_513 700 | chip_325_500 701 | chip_184_236 702 | chip_253_285 703 | chip_185_458 704 | chip_364_321 705 | chip_201_433 706 | chip_124_301 707 | chip_194_400 708 | chip_143_497 709 | chip_241_469 710 | chip_317_344 711 | chip_222_289 712 | chip_231_279 713 | chip_060_086 714 | chip_209_433 715 | chip_119_466 716 | chip_224_298 717 | chip_109_421 718 | chip_115_440 719 | chip_232_285 720 | chip_275_475 721 | chip_281_251 722 | chip_100_326 723 | chip_243_270 724 | chip_261_443 725 | chip_116_432 726 | chip_123_509 727 | chip_376_349 728 | chip_266_439 729 | chip_191_331 730 | chip_052_247 731 | chip_239_363 732 | chip_220_266 733 | chip_313_341 734 | chip_181_607 735 | chip_193_366 736 | chip_358_323 737 | chip_121_499 738 | chip_131_438 739 | chip_200_456 740 | chip_184_604 741 | chip_236_332 742 | chip_197_450 743 | chip_239_334 744 | chip_109_158 745 | chip_171_413 746 | chip_239_356 747 | chip_330_503 748 | chip_205_288 749 | chip_198_593 750 | chip_224_429 751 | chip_319_340 752 | chip_119_332 753 | chip_214_319 754 | chip_304_542 755 | chip_227_292 756 | chip_318_513 757 | chip_254_285 758 | chip_227_290 759 | chip_180_241 760 | chip_176_507 761 | chip_080_613 762 | chip_368_344 763 | chip_133_484 764 | chip_295_409 765 | chip_173_504 766 | chip_121_559 767 | chip_125_484 768 | chip_268_324 769 | chip_168_587 770 | chip_225_326 771 | chip_231_296 772 | -------------------------------------------------------------------------------- /data_splits/sen1floods11/bolivia_split.txt: -------------------------------------------------------------------------------- 1 | Bolivia_103757 2 | Bolivia_129334 3 | Bolivia_195474 4 | Bolivia_23014 5 | Bolivia_233925 6 | Bolivia_242570 7 | Bolivia_290290 8 | Bolivia_294583 9 | Bolivia_312675 10 | Bolivia_314919 11 | Bolivia_360519 12 | Bolivia_379434 13 | Bolivia_432776 14 | Bolivia_60373 15 | Bolivia_76104 -------------------------------------------------------------------------------- /data_splits/sen1floods11/test_split.txt: -------------------------------------------------------------------------------- 1 | Ghana_313799 2 | Ghana_1078550 3 | Ghana_97059 4 | Ghana_359826 5 | Ghana_319168 6 | Ghana_866994 7 | Ghana_406026 8 | Ghana_53713 9 | Ghana_83483 10 | Ghana_167233 11 | Ghana_141271 12 | India_900498 13 | India_591317 14 | India_747992 15 | India_79637 16 | India_952728 17 | India_828067 18 | India_570384 19 | India_44475 20 | India_80221 21 | India_1018327 22 | India_592446 23 | India_772630 24 | India_631692 25 | India_399883 26 | Mekong_333434 27 | Mekong_45934 28 | Mekong_1443339 29 | Mekong_382276 30 | Mekong_254910 31 | Mekong_424793 32 | Nigeria_417184 33 | Nigeria_225131 34 | Nigeria_812045 35 | Nigeria_22088 36 | Pakistan_849790 37 | Pakistan_664885 38 | Pakistan_694942 39 | Pakistan_70625 40 | Pakistan_528249 41 | Pakistan_167553 42 | Paraguay_913449 43 | Paraguay_280900 44 | Paraguay_790830 45 | Paraguay_232281 46 | Paraguay_271769 47 | Paraguay_40936 48 | Paraguay_1029191 49 | Paraguay_59731 50 | Paraguay_683296 51 | Paraguay_34417 52 | Paraguay_868895 53 | Paraguay_511199 54 | Paraguay_80102 55 | Paraguay_651904 56 | Somalia_699062 57 | Somalia_60129 58 | Somalia_685158 59 | Somalia_166342 60 | Somalia_94102 61 | Somalia_322855 62 | Spain_6860600 63 | Spain_7558720 64 | Spain_5650136 65 | Spain_7370579 66 | Spain_6095801 67 | Spain_7387658 68 | Sri-Lanka_117737 69 | Sri-Lanka_1049830 70 | Sri-Lanka_534068 71 | Sri-Lanka_922192 72 | Sri-Lanka_377277 73 | Sri-Lanka_649970 74 | Sri-Lanka_450918 75 | Sri-Lanka_849649 76 | Sri-Lanka_713926 77 | USA_905409 78 | USA_430764 79 | USA_350244 80 | USA_527077 81 | USA_933610 82 | USA_66511 83 | USA_519181 84 | USA_778194 85 | USA_67102 86 | USA_1049586 87 | USA_595451 88 | USA_670826 89 | USA_504150 90 | USA_758178 91 | -------------------------------------------------------------------------------- /data_splits/sen1floods11/train_split.txt: -------------------------------------------------------------------------------- 1 | Ghana_103272 2 | Ghana_24858 3 | Ghana_147015 4 | Ghana_953791 5 | Ghana_154838 6 | Ghana_134751 7 | Ghana_61925 8 | Ghana_156478 9 | Ghana_144050 10 | Ghana_49890 11 | Ghana_97516 12 | Ghana_168875 13 | Ghana_141910 14 | Ghana_146222 15 | Ghana_11745 16 | Ghana_161233 17 | Ghana_7496 18 | Ghana_128663 19 | Ghana_264787 20 | Ghana_597288 21 | Ghana_8090 22 | Ghana_187318 23 | Ghana_362274 24 | Ghana_1089161 25 | Ghana_887131 26 | Ghana_247288 27 | Ghana_194723 28 | Ghana_26376 29 | Ghana_45911 30 | Ghana_83200 31 | Ghana_135389 32 | India_285297 33 | India_1072277 34 | India_324254 35 | India_177319 36 | India_391908 37 | India_166709 38 | India_534470 39 | India_773682 40 | India_956930 41 | India_979278 42 | India_664410 43 | India_747970 44 | India_207862 45 | India_500266 46 | India_179238 47 | India_287642 48 | India_774689 49 | India_383430 50 | India_373039 51 | India_698338 52 | India_1017769 53 | India_981708 54 | India_91379 55 | India_59460 56 | India_624341 57 | India_566697 58 | India_804466 59 | India_103447 60 | India_136196 61 | India_842775 62 | India_25540 63 | India_56450 64 | India_135434 65 | India_652725 66 | India_943439 67 | India_73419 68 | India_591549 69 | India_273873 70 | India_707886 71 | India_265762 72 | Mekong_221789 73 | Mekong_52610 74 | Mekong_596495 75 | Mekong_1439641 76 | Mekong_1191208 77 | Mekong_774566 78 | Mekong_213094 79 | Mekong_119477 80 | Mekong_269835 81 | Mekong_846007 82 | Mekong_1111068 83 | Mekong_1282475 84 | Mekong_16233 85 | Mekong_342411 86 | Mekong_1395593 87 | Mekong_922373 88 | Mekong_1396181 89 | Mekong_1248200 90 | Nigeria_529525 91 | Nigeria_143329 92 | Nigeria_439488 93 | Nigeria_707067 94 | Nigeria_81933 95 | Nigeria_35845 96 | Nigeria_598959 97 | Nigeria_952958 98 | Nigeria_78061 99 | Nigeria_600295 100 | Pakistan_909806 101 | Pakistan_132143 102 | Pakistan_712873 103 | Pakistan_35915 104 | Pakistan_246510 105 | Pakistan_667363 106 | Pakistan_366265 107 | Pakistan_548910 108 | Pakistan_740461 109 | Pakistan_211386 110 | Pakistan_401863 111 | Pakistan_246718 112 | Pakistan_474121 113 | Pakistan_65724 114 | Pakistan_1036366 115 | Pakistan_760290 116 | Paraguay_721886 117 | Paraguay_62897 118 | Paraguay_36015 119 | Paraguay_12870 120 | Paraguay_126224 121 | Paraguay_215904 122 | Paraguay_54421 123 | Paraguay_247656 124 | Paraguay_179624 125 | Paraguay_1029042 126 | Paraguay_246154 127 | Paraguay_892933 128 | Paraguay_11869 129 | Paraguay_921838 130 | Paraguay_24341 131 | Paraguay_605682 132 | Paraguay_997480 133 | Paraguay_148318 134 | Paraguay_191503 135 | Paraguay_1076204 136 | Paraguay_198534 137 | Paraguay_470303 138 | Paraguay_822142 139 | Paraguay_795075 140 | Paraguay_339807 141 | Paraguay_1056717 142 | Paraguay_403081 143 | Paraguay_149787 144 | Paraguay_36146 145 | Paraguay_792268 146 | Paraguay_482517 147 | Paraguay_212687 148 | Paraguay_791364 149 | Paraguay_224845 150 | Paraguay_149830 151 | Paraguay_48673 152 | Paraguay_44682 153 | Paraguay_989230 154 | Paraguay_225187 155 | Somalia_989553 156 | Somalia_93023 157 | Somalia_371421 158 | Somalia_230192 159 | Somalia_886726 160 | Somalia_992457 161 | Somalia_7931 162 | Somalia_195014 163 | Somalia_32375 164 | Somalia_1087508 165 | Somalia_295782 166 | Somalia_970508 167 | Somalia_626316 168 | Somalia_1068756 169 | Somalia_205466 170 | Spain_5923267 171 | Spain_7786924 172 | Spain_7856615 173 | Spain_4915752 174 | Spain_2472849 175 | Spain_8104659 176 | Spain_5816638 177 | Spain_2523247 178 | Spain_6199994 179 | Spain_8154154 180 | Spain_1167260 181 | Spain_2938657 182 | Spain_5678382 183 | Spain_716716 184 | Spain_3285448 185 | Spain_8199661 186 | Spain_337094 187 | Spain_2594119 188 | Sri-Lanka_152185 189 | Sri-Lanka_55568 190 | Sri-Lanka_92824 191 | Sri-Lanka_847275 192 | Sri-Lanka_845821 193 | Sri-Lanka_14484 194 | Sri-Lanka_1038087 195 | Sri-Lanka_178753 196 | Sri-Lanka_163406 197 | Sri-Lanka_233609 198 | Sri-Lanka_135713 199 | Sri-Lanka_748447 200 | Sri-Lanka_579082 201 | Sri-Lanka_916628 202 | Sri-Lanka_883641 203 | Sri-Lanka_52223 204 | Sri-Lanka_400518 205 | Sri-Lanka_523539 206 | Sri-Lanka_49764 207 | Sri-Lanka_653336 208 | Sri-Lanka_956740 209 | Sri-Lanka_249079 210 | Sri-Lanka_120804 211 | Sri-Lanka_551926 212 | USA_994009 213 | USA_66026 214 | USA_11422 215 | USA_955053 216 | USA_114964 217 | USA_693819 218 | USA_86502 219 | USA_438959 220 | USA_181263 221 | USA_605492 222 | USA_115033 223 | USA_375183 224 | USA_347609 225 | USA_908474 226 | USA_211406 227 | USA_831672 228 | USA_664261 229 | USA_770353 230 | USA_217598 231 | USA_638521 232 | USA_387945 233 | USA_224165 234 | USA_231124 235 | USA_170264 236 | USA_58086 237 | USA_1010394 238 | USA_198411 239 | USA_486103 240 | USA_260929 241 | USA_179917 242 | USA_84195 243 | USA_604222 244 | USA_354981 245 | USA_232060 246 | USA_655230 247 | USA_652955 248 | USA_1068362 249 | USA_806273 250 | USA_788696 251 | USA_348639 252 | USA_181503 253 | -------------------------------------------------------------------------------- /data_splits/sen1floods11/val_split.txt: -------------------------------------------------------------------------------- 1 | Ghana_5079 2 | Ghana_895194 3 | Ghana_868803 4 | Ghana_142312 5 | Ghana_234935 6 | Ghana_132163 7 | Ghana_495107 8 | Ghana_124834 9 | Ghana_1033830 10 | Ghana_277 11 | Ghana_308249 12 | India_1050276 13 | India_764946 14 | India_118868 15 | India_533192 16 | India_180633 17 | India_244057 18 | India_691027 19 | India_769408 20 | India_1018317 21 | India_869358 22 | India_164336 23 | India_70352 24 | India_833266 25 | India_1068117 26 | Mekong_1149855 27 | Mekong_977338 28 | Mekong_474783 29 | Mekong_293769 30 | Mekong_1413877 31 | Mekong_98310 32 | Nigeria_31096 33 | Nigeria_984831 34 | Nigeria_1095404 35 | Nigeria_820924 36 | Pakistan_43105 37 | Pakistan_94095 38 | Pakistan_210595 39 | Pakistan_1027214 40 | Pakistan_336228 41 | Pakistan_9684 42 | Paraguay_305760 43 | Paraguay_648632 44 | Paraguay_172476 45 | Paraguay_581976 46 | Paraguay_284928 47 | Paraguay_1019808 48 | Paraguay_76868 49 | Paraguay_252217 50 | Paraguay_205585 51 | Paraguay_7894 52 | Paraguay_896458 53 | Paraguay_657443 54 | Paraguay_934240 55 | Paraguay_153941 56 | Somalia_12849 57 | Somalia_256539 58 | Somalia_61368 59 | Somalia_649376 60 | Somalia_167787 61 | Spain_1199913 62 | Spain_8372658 63 | Spain_7604243 64 | Spain_6537196 65 | Spain_4282030 66 | Spain_8565131 67 | Sri-Lanka_85652 68 | Sri-Lanka_63307 69 | Sri-Lanka_612594 70 | Sri-Lanka_132922 71 | Sri-Lanka_236030 72 | Sri-Lanka_31559 73 | Sri-Lanka_236628 74 | Sri-Lanka_101973 75 | Sri-Lanka_321316 76 | USA_826217 77 | USA_741073 78 | USA_275372 79 | USA_19225 80 | USA_366607 81 | USA_308150 82 | USA_1039203 83 | USA_251323 84 | USA_1082482 85 | USA_225017 86 | USA_986268 87 | USA_646878 88 | USA_761032 89 | USA_741178 90 | -------------------------------------------------------------------------------- /geospatial_fm/__init__.py: -------------------------------------------------------------------------------- 1 | from .geospatial_fm import ConvTransformerTokensToEmbeddingNeck, TemporalViTEncoder, GeospatialNeck 2 | from .geospatial_pipelines import ( 3 | TorchRandomCrop, 4 | LoadGeospatialAnnotations, 5 | LoadGeospatialImageFromFile, 6 | Reshape, 7 | CastTensor, 8 | CollectTestList, 9 | TorchPermute 10 | ) 11 | from .datasets import GeospatialDataset 12 | from .temporal_encoder_decoder import TemporalEncoderDecoder 13 | 14 | __all__ = [ 15 | "GeospatialDataset", 16 | "TemporalViTEncoder", 17 | "ConvTransformerTokensToEmbeddingNeck", 18 | "LoadGeospatialAnnotations", 19 | "LoadGeospatialImageFromFile", 20 | "TorchRandomCrop", 21 | "TemporalEncoderDecoder", 22 | "Reshape", 23 | "CastTensor", 24 | "CollectTestList", 25 | "GeospatialNeck", 26 | "TorchPermute" 27 | ] 28 | -------------------------------------------------------------------------------- /geospatial_fm/datasets.py: -------------------------------------------------------------------------------- 1 | from mmseg.datasets.builder import DATASETS 2 | from mmseg.datasets.custom import CustomDataset 3 | from .geospatial_pipelines import LoadGeospatialAnnotations 4 | 5 | 6 | @DATASETS.register_module() 7 | class GeospatialDataset(CustomDataset): 8 | """GeospatialDataset dataset. 9 | """ 10 | 11 | def __init__(self, CLASSES=(0, 1), PALETTE=None, **kwargs): 12 | 13 | self.CLASSES = CLASSES 14 | 15 | self.PALETTE = PALETTE 16 | 17 | gt_seg_map_loader_cfg = kwargs.pop('gt_seg_map_loader_cfg') if 'gt_seg_map_loader_cfg' in kwargs else dict() 18 | reduce_zero_label = kwargs.pop('reduce_zero_label') if 'reduce_zero_label' in kwargs else False 19 | 20 | super(GeospatialDataset, self).__init__( 21 | reduce_zero_label=reduce_zero_label, 22 | # ignore_index=2, 23 | **kwargs) 24 | 25 | self.gt_seg_map_loader = LoadGeospatialAnnotations(reduce_zero_label=reduce_zero_label, **gt_seg_map_loader_cfg) -------------------------------------------------------------------------------- /geospatial_fm/geospatial_fm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | # -------------------------------------------------------- 7 | # References: 8 | # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm 9 | # DeiT: https://github.com/facebookresearch/deit 10 | # -------------------------------------------------------- 11 | 12 | import numpy as np 13 | import torch 14 | import torch.nn as nn 15 | from einops import rearrange 16 | from mmcv.runner import load_checkpoint 17 | from mmseg.models.builder import BACKBONES, NECKS 18 | from timm.models.layers import to_2tuple 19 | from timm.models.vision_transformer import Block 20 | from typing import List 21 | 22 | 23 | def _convTranspose2dOutput( 24 | input_size: int, 25 | stride: int, 26 | padding: int, 27 | dilation: int, 28 | kernel_size: int, 29 | output_padding: int, 30 | ): 31 | """ 32 | Calculate the output size of a ConvTranspose2d. 33 | Taken from: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html 34 | """ 35 | return ( 36 | (input_size - 1) * stride 37 | - 2 * padding 38 | + dilation * (kernel_size - 1) 39 | + output_padding 40 | + 1 41 | ) 42 | 43 | 44 | def get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos: torch.Tensor): 45 | """ 46 | embed_dim: output dimension for each position 47 | pos: a list of positions to be encoded: size (M,) 48 | out: (M, D) 49 | """ 50 | assert embed_dim % 2 == 0 51 | omega = np.arange(embed_dim // 2, dtype=np.float32) 52 | omega /= embed_dim / 2.0 53 | omega = 1.0 / 10000**omega # (D/2,) 54 | 55 | pos = pos.reshape(-1) # (M,) 56 | out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product 57 | 58 | emb_sin = np.sin(out) # (M, D/2) 59 | emb_cos = np.cos(out) # (M, D/2) 60 | 61 | emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) 62 | return emb 63 | 64 | 65 | def get_3d_sincos_pos_embed(embed_dim: int, grid_size: tuple, cls_token: bool = False): 66 | # Copyright (c) Meta Platforms, Inc. and affiliates. 67 | # All rights reserved. 68 | 69 | # This source code is licensed under the license found in the 70 | # LICENSE file in the root directory of this source tree. 71 | # -------------------------------------------------------- 72 | # Position embedding utils 73 | # -------------------------------------------------------- 74 | """ 75 | grid_size: 3d tuple of grid size: t, h, w 76 | return: 77 | pos_embed: L, D 78 | """ 79 | 80 | assert embed_dim % 16 == 0 81 | 82 | t_size, h_size, w_size = grid_size 83 | 84 | w_embed_dim = embed_dim // 16 * 6 85 | h_embed_dim = embed_dim // 16 * 6 86 | t_embed_dim = embed_dim // 16 * 4 87 | 88 | w_pos_embed = get_1d_sincos_pos_embed_from_grid(w_embed_dim, np.arange(w_size)) 89 | h_pos_embed = get_1d_sincos_pos_embed_from_grid(h_embed_dim, np.arange(h_size)) 90 | t_pos_embed = get_1d_sincos_pos_embed_from_grid(t_embed_dim, np.arange(t_size)) 91 | 92 | w_pos_embed = np.tile(w_pos_embed, (t_size * h_size, 1)) 93 | h_pos_embed = np.tile(np.repeat(h_pos_embed, w_size, axis=0), (t_size, 1)) 94 | t_pos_embed = np.repeat(t_pos_embed, h_size * w_size, axis=0) 95 | 96 | pos_embed = np.concatenate((w_pos_embed, h_pos_embed, t_pos_embed), axis=1) 97 | 98 | if cls_token: 99 | pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) 100 | return pos_embed 101 | 102 | 103 | class PatchEmbed(nn.Module): 104 | """Frames of 2D Images to Patch Embedding 105 | The 3D version of timm.models.vision_transformer.PatchEmbed 106 | """ 107 | 108 | def __init__( 109 | self, 110 | img_size: int = 224, 111 | patch_size: int = 16, 112 | num_frames: int = 3, 113 | tubelet_size: int = 1, 114 | in_chans: int = 3, 115 | embed_dim: int = 768, 116 | norm_layer: nn.Module = None, 117 | flatten: bool = True, 118 | bias: bool = True, 119 | ): 120 | super().__init__() 121 | img_size = to_2tuple(img_size) 122 | patch_size = to_2tuple(patch_size) 123 | self.img_size = img_size 124 | self.patch_size = patch_size 125 | self.num_frames = num_frames 126 | self.tubelet_size = tubelet_size 127 | self.grid_size = ( 128 | num_frames // tubelet_size, 129 | img_size[0] // patch_size[0], 130 | img_size[1] // patch_size[1], 131 | ) 132 | self.num_patches = self.grid_size[0] * self.grid_size[1] * self.grid_size[2] 133 | self.flatten = flatten 134 | 135 | self.proj = nn.Conv3d( 136 | in_chans, 137 | embed_dim, 138 | kernel_size=(tubelet_size, patch_size[0], patch_size[1]), 139 | stride=(tubelet_size, patch_size[0], patch_size[1]), 140 | bias=bias, 141 | ) 142 | self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() 143 | 144 | def forward(self, x): 145 | B, C, T, H, W = x.shape 146 | assert ( 147 | H == self.img_size[0] 148 | ), f"Input image height ({H}) doesn't match model ({self.img_size[0]})." 149 | assert ( 150 | W == self.img_size[1] 151 | ), f"Input image width ({W}) doesn't match model ({self.img_size[1]})." 152 | x = self.proj(x) 153 | Hp, Wp = x.shape[3], x.shape[4] 154 | if self.flatten: 155 | x = x.flatten(2).transpose(1, 2) # B,C,T,H,W -> B,C,L -> B,L,C 156 | x = self.norm(x) 157 | return x, Hp, Wp 158 | 159 | 160 | class Norm2d(nn.Module): 161 | def __init__(self, embed_dim: int): 162 | super().__init__() 163 | self.ln = nn.LayerNorm(embed_dim, eps=1e-6) 164 | 165 | def forward(self, x): 166 | x = x.permute(0, 2, 3, 1) 167 | x = self.ln(x) 168 | x = x.permute(0, 3, 1, 2).contiguous() 169 | return x 170 | 171 | @NECKS.register_module() 172 | class GeospatialNeck(nn.Module): 173 | """ 174 | Neck that transforms the token-based output of transformer into a single embedding suitable for processing with standard layers. 175 | Performs 4 ConvTranspose2d operations on the rearranged input with kernel_size=2 and stride=2 176 | """ 177 | 178 | def __init__( 179 | self, 180 | embed_dim: int, 181 | first_conv_channels: int, 182 | Hp: int = 14, 183 | Wp: int = 14, 184 | channel_reduction_factor: int = 2, 185 | num_convs: int = 4, 186 | num_convs_per_upscale: int = 1, 187 | dropout: bool = False, 188 | drop_cls_token: bool = True, 189 | ): 190 | """ 191 | 192 | Args: 193 | embed_dim (int): Input embedding dimension 194 | first_conv_channel (int): Number of channels for first dimension 195 | Hp (int, optional): Height (in patches) of embedding to be upscaled. Defaults to 14. 196 | Wp (int, optional): Width (in patches) of embedding to be upscaled. Defaults to 14. 197 | channel_reduction_factor (int): Factor that each convolutional block reduces number of channels by. 198 | num_convs (int): Number of convolutional upscaling blocks. Each upscales 2x. 199 | drop_cls_token (bool, optional): Whether there is a cls_token, which should be dropped. This assumes the cls token is the first token. Defaults to True. 200 | """ 201 | super().__init__() 202 | self.drop_cls_token = drop_cls_token 203 | self.Hp = Hp 204 | self.Wp = Wp 205 | self.H_out = Hp 206 | self.W_out = Wp 207 | self.dropout = dropout 208 | 209 | conv_kernel_size = 3 210 | conv_padding = 1 211 | 212 | kernel_size = 2 213 | stride = 2 214 | dilation = 1 215 | padding = 0 216 | output_padding = 0 217 | 218 | self.embed_dim = embed_dim 219 | self.channels = [first_conv_channels // (channel_reduction_factor ** i) for i in range(num_convs)] 220 | self.channels = [embed_dim] + self.channels 221 | 222 | for _ in range(len(self.channels) - 1): 223 | self.H_out = _convTranspose2dOutput( 224 | self.H_out, stride, padding, dilation, kernel_size, output_padding 225 | ) 226 | self.W_out = _convTranspose2dOutput( 227 | self.W_out, stride, padding, dilation, kernel_size, output_padding 228 | ) 229 | 230 | def _build_upscale_block(channels_in, channels_out): 231 | layers = [] 232 | layers.append(nn.ConvTranspose2d( 233 | channels_in, 234 | channels_out, 235 | kernel_size=kernel_size, 236 | stride=stride, 237 | dilation=dilation, 238 | padding=padding, 239 | output_padding=output_padding, 240 | )) 241 | 242 | layers += [nn.Sequential( 243 | nn.Conv2d(channels_out, 244 | channels_out, 245 | kernel_size=conv_kernel_size, 246 | padding=conv_padding), 247 | nn.BatchNorm2d(channels_out), 248 | nn.Dropout() if self.dropout else nn.Identity(), 249 | nn.ReLU()) for _ in range(num_convs_per_upscale)] 250 | 251 | return nn.Sequential(*layers) 252 | 253 | self.layers = nn.ModuleList([ 254 | _build_upscale_block(self.channels[i], self.channels[i+1]) 255 | for i in range(len(self.channels) - 1) 256 | ]) 257 | 258 | def forward(self, x): 259 | x = x[0] 260 | if self.drop_cls_token: 261 | x = x[:, 1:, :] 262 | x = x.permute(0, 2, 1).reshape(x.shape[0], -1, self.Hp, self.Wp) 263 | 264 | for layer in self.layers: 265 | x = layer(x) 266 | 267 | x = x.reshape((x.shape[0], self.channels[-1], self.H_out, self.W_out)) 268 | 269 | out = tuple([x]) 270 | 271 | return out 272 | 273 | @NECKS.register_module() 274 | class ConvTransformerTokensToEmbeddingNeck(nn.Module): 275 | """ 276 | Neck that transforms the token-based output of transformer into a single embedding suitable for processing with standard layers. 277 | Performs 4 ConvTranspose2d operations on the rearranged input with kernel_size=2 and stride=2 278 | """ 279 | 280 | def __init__( 281 | self, 282 | embed_dim: int, 283 | output_embed_dim: int, 284 | # num_frames: int = 1, 285 | Hp: int = 14, 286 | Wp: int = 14, 287 | drop_cls_token: bool = True, 288 | ): 289 | """ 290 | 291 | Args: 292 | embed_dim (int): Input embedding dimension 293 | output_embed_dim (int): Output embedding dimension 294 | Hp (int, optional): Height (in patches) of embedding to be upscaled. Defaults to 14. 295 | Wp (int, optional): Width (in patches) of embedding to be upscaled. Defaults to 14. 296 | drop_cls_token (bool, optional): Whether there is a cls_token, which should be dropped. This assumes the cls token is the first token. Defaults to True. 297 | """ 298 | super().__init__() 299 | self.drop_cls_token = drop_cls_token 300 | self.Hp = Hp 301 | self.Wp = Wp 302 | self.H_out = Hp 303 | self.W_out = Wp 304 | # self.num_frames = num_frames 305 | 306 | kernel_size = 2 307 | stride = 2 308 | dilation = 1 309 | padding = 0 310 | output_padding = 0 311 | for _ in range(4): 312 | self.H_out = _convTranspose2dOutput( 313 | self.H_out, stride, padding, dilation, kernel_size, output_padding 314 | ) 315 | self.W_out = _convTranspose2dOutput( 316 | self.W_out, stride, padding, dilation, kernel_size, output_padding 317 | ) 318 | 319 | self.embed_dim = embed_dim 320 | self.output_embed_dim = output_embed_dim 321 | self.fpn1 = nn.Sequential( 322 | nn.ConvTranspose2d( 323 | self.embed_dim, 324 | self.output_embed_dim, 325 | kernel_size=kernel_size, 326 | stride=stride, 327 | dilation=dilation, 328 | padding=padding, 329 | output_padding=output_padding, 330 | ), 331 | Norm2d(self.output_embed_dim), 332 | nn.GELU(), 333 | nn.ConvTranspose2d( 334 | self.output_embed_dim, 335 | self.output_embed_dim, 336 | kernel_size=kernel_size, 337 | stride=stride, 338 | dilation=dilation, 339 | padding=padding, 340 | output_padding=output_padding, 341 | ), 342 | ) 343 | self.fpn2 = nn.Sequential( 344 | nn.ConvTranspose2d( 345 | self.output_embed_dim, 346 | self.output_embed_dim, 347 | kernel_size=kernel_size, 348 | stride=stride, 349 | dilation=dilation, 350 | padding=padding, 351 | output_padding=output_padding, 352 | ), 353 | Norm2d(self.output_embed_dim), 354 | nn.GELU(), 355 | nn.ConvTranspose2d( 356 | self.output_embed_dim, 357 | self.output_embed_dim, 358 | kernel_size=kernel_size, 359 | stride=stride, 360 | dilation=dilation, 361 | padding=padding, 362 | output_padding=output_padding, 363 | ), 364 | ) 365 | 366 | def forward(self, x): 367 | x = x[0] 368 | if self.drop_cls_token: 369 | x = x[:, 1:, :] 370 | x = x.permute(0, 2, 1).reshape(x.shape[0], -1, self.Hp, self.Wp) 371 | 372 | x = self.fpn1(x) 373 | x = self.fpn2(x) 374 | 375 | x = x.reshape((-1, self.output_embed_dim, self.H_out, self.W_out)) 376 | 377 | out = tuple([x]) 378 | 379 | return out 380 | 381 | 382 | @BACKBONES.register_module() 383 | class TemporalViTEncoder(nn.Module): 384 | """Encoder from an ViT with capability to take in temporal input. 385 | 386 | This class defines an encoder taken from a ViT architecture. 387 | """ 388 | 389 | def __init__( 390 | self, 391 | img_size: int = 224, 392 | patch_size: int = 16, 393 | num_frames: int = 1, 394 | tubelet_size: int = 1, 395 | in_chans: int = 3, 396 | embed_dim: int = 1024, 397 | depth: int = 24, 398 | num_heads: int = 16, 399 | mlp_ratio: float = 4.0, 400 | norm_layer: nn.Module = nn.LayerNorm, 401 | norm_pix_loss: bool = False, 402 | pretrained: str = None 403 | ): 404 | """ 405 | 406 | Args: 407 | img_size (int, optional): Input image size. Defaults to 224. 408 | patch_size (int, optional): Patch size to be used by the transformer. Defaults to 16. 409 | num_frames (int, optional): Number of frames (temporal dimension) to be input to the encoder. Defaults to 1. 410 | tubelet_size (int, optional): Tubelet size used in patch embedding. Defaults to 1. 411 | in_chans (int, optional): Number of input channels. Defaults to 3. 412 | embed_dim (int, optional): Embedding dimension. Defaults to 1024. 413 | depth (int, optional): Encoder depth. Defaults to 24. 414 | num_heads (int, optional): Number of heads used in the encoder blocks. Defaults to 16. 415 | mlp_ratio (float, optional): Ratio to be used for the size of the MLP in encoder blocks. Defaults to 4.0. 416 | norm_layer (nn.Module, optional): Norm layer to be used. Defaults to nn.LayerNorm. 417 | norm_pix_loss (bool, optional): Whether to use Norm Pix Loss. Defaults to False. 418 | pretrained (str, optional): Path to pretrained encoder weights. Defaults to None. 419 | """ 420 | super().__init__() 421 | 422 | # -------------------------------------------------------------------------- 423 | # MAE encoder specifics 424 | self.embed_dim = embed_dim 425 | self.patch_embed = PatchEmbed( 426 | img_size, patch_size, num_frames, tubelet_size, in_chans, embed_dim 427 | ) 428 | num_patches = self.patch_embed.num_patches 429 | self.num_frames = num_frames 430 | 431 | self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) 432 | self.pos_embed = nn.Parameter( 433 | torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False 434 | ) # fixed sin-cos embedding 435 | 436 | self.blocks = nn.ModuleList( 437 | [ 438 | Block( 439 | embed_dim, 440 | num_heads, 441 | mlp_ratio, 442 | qkv_bias=True, 443 | norm_layer=norm_layer, 444 | ) 445 | for _ in range(depth) 446 | ] 447 | ) 448 | self.norm = norm_layer(embed_dim) 449 | 450 | self.norm_pix_loss = norm_pix_loss 451 | self.pretrained = pretrained 452 | 453 | self.initialize_weights() 454 | 455 | def initialize_weights(self): 456 | # initialization 457 | # initialize (and freeze) pos_embed by sin-cos embedding 458 | pos_embed = get_3d_sincos_pos_embed( 459 | self.pos_embed.shape[-1], self.patch_embed.grid_size, cls_token=True 460 | ) 461 | self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) 462 | 463 | # initialize patch_embed like nn.Linear (instead of nn.Conv2d) 464 | w = self.patch_embed.proj.weight.data 465 | torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) 466 | 467 | if isinstance(self.pretrained, str): 468 | self.apply(self._init_weights) 469 | print(f"load from {self.pretrained}") 470 | load_checkpoint(self, self.pretrained, strict=False, map_location="cpu") 471 | elif self.pretrained is None: 472 | # # initialize nn.Linear and nn.LayerNorm 473 | self.apply(self._init_weights) 474 | 475 | 476 | def _init_weights(self, m): 477 | if isinstance(m, nn.Linear): 478 | # we use xavier_uniform following official JAX ViT: 479 | torch.nn.init.xavier_uniform_(m.weight) 480 | if isinstance(m, nn.Linear) and m.bias is not None: 481 | nn.init.constant_(m.bias, 0) 482 | elif isinstance(m, nn.LayerNorm): 483 | nn.init.constant_(m.bias, 0) 484 | nn.init.constant_(m.weight, 1.0) 485 | 486 | def forward(self, x): 487 | # embed patches 488 | x, _, _ = self.patch_embed(x) 489 | 490 | # add pos embed w/o cls token 491 | x = x + self.pos_embed[:, 1:, :] 492 | 493 | # append cls token 494 | cls_token = self.cls_token + self.pos_embed[:, :1, :] 495 | cls_tokens = cls_token.expand(x.shape[0], -1, -1) 496 | x = torch.cat((cls_tokens, x), dim=1) 497 | 498 | # apply Transformer blocks 499 | for blk in self.blocks: 500 | x = blk(x) 501 | 502 | x = self.norm(x) 503 | 504 | return tuple([x]) 505 | -------------------------------------------------------------------------------- /geospatial_fm/geospatial_pipelines.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file holds pipeline components useful for loading remote sensing images and annotations. 3 | """ 4 | import os.path as osp 5 | 6 | import numpy as np 7 | import rioxarray 8 | import torchvision.transforms.functional as F 9 | from mmcv.parallel import DataContainer as DC 10 | from mmseg.datasets.builder import PIPELINES 11 | from torchvision import transforms 12 | 13 | 14 | def open_tiff(fname): 15 | data = rioxarray.open_rasterio(fname) 16 | return data.to_numpy() 17 | 18 | 19 | @PIPELINES.register_module() 20 | class ConstantMultiply(object): 21 | """Multiply image by constant. 22 | 23 | It multiplies an image by a constant 24 | 25 | Args: 26 | constant (float, optional): The constant to multiply by. 1.0 (e.g. no alteration if not specified) 27 | """ 28 | 29 | def __init__(self, constant=1.0): 30 | self.constant = constant 31 | 32 | def __call__(self, results): 33 | """Call function to multiply by constant input img 34 | 35 | Args: 36 | results (dict): Result dict from loading pipeline. 37 | 38 | Returns: 39 | dict: Results with image multiplied by constant 40 | """ 41 | 42 | results["img"] = results["img"] * self.constant 43 | 44 | return results 45 | 46 | 47 | @PIPELINES.register_module() 48 | class BandsExtract(object): 49 | 50 | """Extract bands from image. Assumes channels last 51 | 52 | It extracts bands from an image. Assumes channels last. 53 | 54 | Args: 55 | bands (list, optional): The list of indexes to use for extraction. If not provided nothing will happen. 56 | """ 57 | 58 | def __init__(self, bands=None): 59 | self.bands = bands 60 | 61 | def __call__(self, results): 62 | """Call function to multiply extract bands 63 | 64 | Args: 65 | results (dict): Result dict from loading pipeline. 66 | 67 | Returns: 68 | dict: Results with extracted bands 69 | """ 70 | 71 | if self.bands is not None: 72 | results["img"] = results["img"][..., self.bands] 73 | 74 | return results 75 | 76 | 77 | @PIPELINES.register_module() 78 | class TorchRandomCrop(object): 79 | 80 | """ 81 | 82 | It randomly crops a multichannel tensor. 83 | 84 | Args: 85 | crop_size (tuple): the size to use to crop 86 | """ 87 | 88 | def __init__(self, crop_size=(224, 224)): 89 | self.crop_size = crop_size 90 | 91 | def __call__(self, results): 92 | i, j, h, w = transforms.RandomCrop.get_params(results["img"], self.crop_size) 93 | results["img"] = F.crop(results["img"], i, j, h, w).float() 94 | results["gt_semantic_seg"] = F.crop(results["gt_semantic_seg"], i, j, h, w) 95 | 96 | return results 97 | 98 | 99 | @PIPELINES.register_module() 100 | class TorchNormalize(object): 101 | """Normalize the image. 102 | 103 | It normalises a multichannel image using torch 104 | 105 | Args: 106 | mean (sequence): Mean values . 107 | std (sequence): Std values of 3 channels. 108 | """ 109 | 110 | def __init__(self, means, stds): 111 | self.means = means 112 | self.stds = stds 113 | 114 | def __call__(self, results): 115 | """Call function to normalize images. 116 | 117 | Args: 118 | results (dict): Result dict from loading pipeline. 119 | 120 | Returns: 121 | dict: Normalized results, 'img_norm_cfg' key is added into 122 | result dict. 123 | """ 124 | results["img"] = F.normalize(results["img"], self.means, self.stds, False) 125 | results["img_norm_cfg"] = dict(mean=self.means, std=self.stds) 126 | return results 127 | 128 | 129 | @PIPELINES.register_module() 130 | class Reshape(object): 131 | """ 132 | It reshapes a tensor. 133 | Args: 134 | new_shape (tuple): tuple with new shape 135 | keys (list): list with keys to apply reshape to 136 | look_up (dict): dictionary to use to look up dimensions when more than one is to be inferred from the original image, which have to be inputed as -1s in the new_shape argument. eg {'2': 1, '3': 2} would infer the new 3rd and 4th dimensions from the 2nd and 3rd from the original image. 137 | """ 138 | 139 | def __init__(self, new_shape, keys, look_up=None): 140 | self.new_shape = new_shape 141 | self.keys = keys 142 | self.look_up = look_up 143 | 144 | def __call__(self, results): 145 | dim_to_infer = np.where(np.array(self.new_shape) == -1)[0] 146 | 147 | for key in self.keys: 148 | if (len(dim_to_infer) > 1) & (self.look_up is not None): 149 | old_shape = results[key].shape 150 | tmp = np.array(self.new_shape) 151 | for i in range(len(dim_to_infer)): 152 | tmp[dim_to_infer[i]] = old_shape[self.look_up[str(dim_to_infer[i])]] 153 | self.new_shape = tuple(tmp) 154 | results[key] = results[key].reshape(self.new_shape) 155 | 156 | return results 157 | 158 | 159 | @PIPELINES.register_module() 160 | class CastTensor(object): 161 | """ 162 | 163 | It casts a tensor. 164 | 165 | Args: 166 | new_type (str): torch type 167 | keys (list): list with keys to apply reshape to 168 | """ 169 | 170 | def __init__(self, new_type, keys): 171 | self.new_type = new_type 172 | self.keys = keys 173 | 174 | def __call__(self, results): 175 | for key in self.keys: 176 | results[key] = results[key].type(self.new_type) 177 | 178 | return results 179 | 180 | 181 | @PIPELINES.register_module() 182 | class CollectTestList(object): 183 | """ 184 | 185 | It processes the data in a way that conforms with inference and test pipelines. 186 | 187 | Args: 188 | 189 | keys (list): keys to collect (eg img/gt_semantic_seg) 190 | meta_keys (list): additional meta to collect and add to img_metas 191 | 192 | """ 193 | 194 | def __init__( 195 | self, 196 | keys, 197 | meta_keys=( 198 | "filename", 199 | "ori_filename", 200 | "ori_shape", 201 | "img_shape", 202 | "pad_shape", 203 | "scale_factor", 204 | "flip", 205 | "flip_direction", 206 | "img_norm_cfg", 207 | ), 208 | ): 209 | self.keys = keys 210 | self.meta_keys = meta_keys 211 | 212 | def __call__(self, results): 213 | data = {} 214 | img_meta = {} 215 | for key in self.meta_keys: 216 | img_meta[key] = results[key] 217 | img_meta = [img_meta] 218 | data["img_metas"] = DC(img_meta, cpu_only=True) 219 | for key in self.keys: 220 | data[key] = [results[key]] 221 | return data 222 | 223 | def __repr__(self): 224 | return ( 225 | self.__class__.__name__ + f"(keys={self.keys}, meta_keys={self.meta_keys})" 226 | ) 227 | 228 | 229 | @PIPELINES.register_module() 230 | class TorchPermute(object): 231 | """Permute dimensions. 232 | 233 | Particularly useful in going from channels_last to channels_first 234 | 235 | Args: 236 | keys (Sequence[str]): Keys of results to be permuted. 237 | order (Sequence[int]): New order of dimensions. 238 | """ 239 | 240 | def __init__(self, keys, order): 241 | self.keys = keys 242 | self.order = order 243 | 244 | def __call__(self, results): 245 | for key in self.keys: 246 | results[key] = results[key].permute(self.order) 247 | 248 | return results 249 | 250 | def __repr__(self): 251 | return self.__class__.__name__ + f"(keys={self.keys}, order={self.order})" 252 | 253 | 254 | @PIPELINES.register_module() 255 | class LoadGeospatialImageFromFile(object): 256 | """ 257 | 258 | It loads a tiff image. Returns in channels last format. 259 | 260 | Args: 261 | to_float32 (bool): Whether to convert the loaded image to a float32 262 | numpy array. If set to False, the loaded image is an uint8 array. 263 | Defaults to False. 264 | nodata (float/int): no data value to substitute to nodata_replace 265 | nodata_replace (float/int): value to use to replace no data 266 | """ 267 | 268 | def __init__(self, to_float32=False, nodata=None, nodata_replace=0.0): 269 | self.to_float32 = to_float32 270 | self.nodata = nodata 271 | self.nodata_replace = nodata_replace 272 | 273 | def __call__(self, results): 274 | if results.get("img_prefix") is not None: 275 | filename = osp.join(results["img_prefix"], results["img_info"]["filename"]) 276 | else: 277 | filename = results["img_info"]["filename"] 278 | img = open_tiff(filename) 279 | # to channels last format 280 | img = np.transpose(img, (1, 2, 0)) 281 | 282 | if self.to_float32: 283 | img = img.astype(np.float32) 284 | 285 | if self.nodata is not None: 286 | img = np.where(img == self.nodata, self.nodata_replace, img) 287 | 288 | results["filename"] = filename 289 | results["ori_filename"] = results["img_info"]["filename"] 290 | results["img"] = img 291 | results["img_shape"] = img.shape 292 | results["ori_shape"] = img.shape 293 | # Set initial values for default meta_keys 294 | results["pad_shape"] = img.shape 295 | results["scale_factor"] = 1.0 296 | results["flip"] = False 297 | num_channels = 1 if len(img.shape) < 3 else img.shape[2] 298 | results["img_norm_cfg"] = dict( 299 | mean=np.zeros(num_channels, dtype=np.float32), 300 | std=np.ones(num_channels, dtype=np.float32), 301 | to_rgb=False, 302 | ) 303 | return results 304 | 305 | def __repr__(self): 306 | repr_str = self.__class__.__name__ 307 | repr_str += f"(to_float32={self.to_float32}" 308 | return repr_str 309 | 310 | 311 | @PIPELINES.register_module() 312 | class LoadGeospatialAnnotations(object): 313 | """Load annotations for semantic segmentation. 314 | 315 | Args: 316 | to_uint8 (bool): Whether to convert the loaded label to a uint8 317 | reduce_zero_label (bool): Whether reduce all label value by 1. 318 | Usually used for datasets where 0 is background label. 319 | Default: False. 320 | nodata (float/int): no data value to substitute to nodata_replace 321 | nodata_replace (float/int): value to use to replace no data 322 | 323 | 324 | """ 325 | 326 | def __init__( 327 | self, 328 | reduce_zero_label=False, 329 | nodata=None, 330 | nodata_replace=-1, 331 | ): 332 | self.reduce_zero_label = reduce_zero_label 333 | self.nodata = nodata 334 | self.nodata_replace = nodata_replace 335 | 336 | def __call__(self, results): 337 | if results.get("seg_prefix", None) is not None: 338 | filename = osp.join(results["seg_prefix"], results["ann_info"]["seg_map"]) 339 | else: 340 | filename = results["ann_info"]["seg_map"] 341 | 342 | gt_semantic_seg = open_tiff(filename).squeeze() 343 | 344 | if self.nodata is not None: 345 | gt_semantic_seg = np.where( 346 | gt_semantic_seg == self.nodata, self.nodata_replace, gt_semantic_seg 347 | ) 348 | # reduce zero_label 349 | if self.reduce_zero_label: 350 | # avoid using underflow conversion 351 | gt_semantic_seg[gt_semantic_seg == 0] = 255 352 | gt_semantic_seg = gt_semantic_seg - 1 353 | gt_semantic_seg[gt_semantic_seg == 254] = 255 354 | if results.get("label_map", None) is not None: 355 | # Add deep copy to solve bug of repeatedly 356 | # replace `gt_semantic_seg`, which is reported in 357 | # https://github.com/open-mmlab/mmsegmentation/pull/1445/ 358 | gt_semantic_seg_copy = gt_semantic_seg.copy() 359 | for old_id, new_id in results["label_map"].items(): 360 | gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id 361 | 362 | results["gt_semantic_seg"] = gt_semantic_seg 363 | results["seg_fields"].append("gt_semantic_seg") 364 | return results 365 | -------------------------------------------------------------------------------- /geospatial_fm/temporal_encoder_decoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from mmseg.core import add_prefix 7 | from mmseg.ops import resize 8 | from mmseg.models import builder 9 | from mmseg.models.builder import SEGMENTORS 10 | from mmseg.models.segmentors.base import BaseSegmentor 11 | from mmseg.models.segmentors.encoder_decoder import EncoderDecoder 12 | 13 | 14 | @SEGMENTORS.register_module() 15 | class TemporalEncoderDecoder(EncoderDecoder): 16 | """Encoder Decoder segmentors. 17 | 18 | EncoderDecoder typically consists of backbone, neck, decode_head, auxiliary_head. 19 | Note that auxiliary_head is only used for deep supervision during training, 20 | which could be dumped during inference. 21 | 22 | The backbone should return plain embeddings. 23 | The neck can process these to make them suitable for the chosen heads. 24 | The heads perform the final processing that will return the output. 25 | """ 26 | 27 | def __init__(self, 28 | backbone, 29 | decode_head, 30 | neck=None, 31 | auxiliary_head=None, 32 | train_cfg=None, 33 | test_cfg=None, 34 | pretrained=None, 35 | init_cfg=None, 36 | frozen_backbone=False): 37 | super(EncoderDecoder, self).__init__(init_cfg) 38 | if pretrained is not None: 39 | assert backbone.get('pretrained') is None, \ 40 | 'both backbone and segmentor set pretrained weight' 41 | backbone.pretrained = pretrained 42 | self.backbone = builder.build_backbone(backbone) 43 | 44 | if frozen_backbone: 45 | for param in self.backbone.parameters(): 46 | param.requires_grad = False 47 | 48 | if neck is not None: 49 | self.neck = builder.build_neck(neck) 50 | self._init_decode_head(decode_head) 51 | self._init_auxiliary_head(auxiliary_head) 52 | 53 | self.train_cfg = train_cfg 54 | self.test_cfg = test_cfg 55 | assert self.with_decode_head 56 | 57 | def encode_decode(self, img, img_metas): 58 | """Encode images with backbone and decode into a semantic segmentation 59 | map of the same size as input.""" 60 | x = self.extract_feat(img) 61 | out = self._decode_head_forward_test(x, img_metas) 62 | 63 | #### size calculated over last two dimensions ### 64 | size = img.shape[-2:] 65 | 66 | out = resize( 67 | input=out, 68 | size=size, 69 | mode='bilinear', 70 | align_corners=self.align_corners) 71 | return out 72 | 73 | def slide_inference(self, img, img_meta, rescale): 74 | """Inference by sliding-window with overlap. 75 | 76 | If h_crop > h_img or w_crop > w_img, the small patch will be used to 77 | decode without padding. 78 | """ 79 | 80 | h_stride, w_stride = self.test_cfg.stride 81 | h_crop, w_crop = self.test_cfg.crop_size 82 | 83 | #### size and bactch size over last two dimensions ### 84 | img_size = img.size() 85 | batch_size = img_size[0] 86 | h_img = img_size[-2] 87 | w_img = img_size[-1] 88 | out_channels = self.out_channels 89 | h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 90 | w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 91 | preds = img.new_zeros((batch_size, out_channels, h_img, w_img)) 92 | count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) 93 | for h_idx in range(h_grids): 94 | for w_idx in range(w_grids): 95 | y1 = h_idx * h_stride 96 | x1 = w_idx * w_stride 97 | y2 = min(y1 + h_crop, h_img) 98 | x2 = min(x1 + w_crop, w_img) 99 | y1 = max(y2 - h_crop, 0) 100 | x1 = max(x2 - w_crop, 0) 101 | 102 | if len(img_size) == 4: 103 | 104 | crop_img = img[:, :, y1:y2, x1:x2] 105 | 106 | elif len(img_size) == 5: 107 | 108 | crop_img = img[:, :, :, y1:y2, x1:x2] 109 | 110 | 111 | 112 | crop_seg_logit = self.encode_decode(crop_img, img_meta) 113 | preds += F.pad(crop_seg_logit, 114 | (int(x1), int(preds.shape[3] - x2), int(y1), 115 | int(preds.shape[2] - y2))) 116 | 117 | count_mat[:, :, y1:y2, x1:x2] += 1 118 | assert (count_mat == 0).sum() == 0 119 | if torch.onnx.is_in_onnx_export(): 120 | # cast count_mat to constant while exporting to ONNX 121 | count_mat = torch.from_numpy( 122 | count_mat.cpu().detach().numpy()).to(device=img.device) 123 | preds = preds / count_mat 124 | 125 | if rescale: 126 | # remove padding area 127 | #### size over last two dimensions ### 128 | resize_shape = img_meta[0]['img_shape'][:2] 129 | preds = preds[:, :, :resize_shape[0], :resize_shape[1]] 130 | preds = resize( 131 | preds, 132 | size=img_meta[0]['ori_shape'][:2], 133 | mode='bilinear', 134 | align_corners=self.align_corners, 135 | warning=False) 136 | return preds 137 | 138 | def whole_inference(self, img, img_meta, rescale): 139 | """Inference with full image.""" 140 | 141 | seg_logit = self.encode_decode(img, img_meta) 142 | if rescale: 143 | # support dynamic shape for onnx 144 | if torch.onnx.is_in_onnx_export(): 145 | size = img.shape[-2:] 146 | else: 147 | # remove padding area 148 | resize_shape = img_meta[0]['img_shape'][:2] 149 | seg_logit = seg_logit[:, :, :resize_shape[0], :resize_shape[1]] 150 | size = img_meta[0]['ori_shape'][:2] 151 | seg_logit = resize( 152 | seg_logit, 153 | size=size, 154 | mode='bilinear', 155 | align_corners=self.align_corners, 156 | warning=False) 157 | 158 | return seg_logit 159 | 160 | def inference(self, img, img_meta, rescale): 161 | """Inference with slide/whole style. 162 | 163 | Args: 164 | img (Tensor): The input image of shape (N, 3, H, W). 165 | img_meta (dict): Image info dict where each dict has: 'img_shape', 166 | 'scale_factor', 'flip', and may also contain 167 | 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. 168 | For details on the values of these keys see 169 | `mmseg/datasets/pipelines/formatting.py:Collect`. 170 | rescale (bool): Whether rescale back to original shape. 171 | 172 | Returns: 173 | Tensor: The output segmentation map. 174 | """ 175 | 176 | assert self.test_cfg.mode in ['slide', 'whole'] 177 | ori_shape = img_meta[0]['ori_shape'] 178 | assert all(_['ori_shape'] == ori_shape for _ in img_meta) 179 | if self.test_cfg.mode == 'slide': 180 | seg_logit = self.slide_inference(img, img_meta, rescale) 181 | else: 182 | seg_logit = self.whole_inference(img, img_meta, rescale) 183 | 184 | if self.out_channels == 1: 185 | output = F.sigmoid(seg_logit) 186 | else: 187 | output = F.softmax(seg_logit, dim=1) 188 | 189 | flip = ( 190 | img_meta[0]["flip"] if "flip" in img_meta[0] else False 191 | ) ##### if flip key is not there d not apply it 192 | if flip: 193 | flip_direction = img_meta[0]["flip_direction"] 194 | assert flip_direction in ["horizontal", "vertical"] 195 | if flip_direction == "horizontal": 196 | output = output.flip(dims=(3,)) 197 | elif flip_direction == "vertical": 198 | output = output.flip(dims=(2,)) 199 | return output 200 | 201 | def simple_test(self, img, img_meta, rescale=True): 202 | """Simple test with single image.""" 203 | seg_logit = self.inference(img, img_meta, rescale) 204 | if self.out_channels == 1: 205 | seg_pred = (seg_logit > self.decode_head.threshold).to(seg_logit).squeeze(1) 206 | else: 207 | seg_pred = seg_logit.argmax(dim=1) 208 | if torch.onnx.is_in_onnx_export(): 209 | 210 | seg_pred = seg_pred.unsqueeze(0) 211 | return seg_pred 212 | seg_pred = seg_pred.cpu().numpy() 213 | # unravel batch dim 214 | seg_pred = list(seg_pred) 215 | return seg_pred 216 | -------------------------------------------------------------------------------- /hls-gfm/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Model and Inputs 3 | Prithvi is a first-of-its-kind temporal Vision transformer (ViT) pretrained by the IBM and NASA team on continental US Harmonised Landsat Sentinel 2 (HLS) data. Particularly, the model adopts a self-supervised encoder developed with a ViT architecture and Masked AutoEncoder learning strategy, with a Mean Squared Error (MSE) as a loss function. The model includes spatial attention across multiple patchies and also temporal attention for each patch. 4 | 5 | ![](Prithvi_training.png) 6 | 7 | The model expects remote sensing data in a video format (B, C, T, H, W). Note that the temporal dimension is very important here and not present in most 8 | other works around remote sensing modeling. Being able to handle a time series of remote sensing images can be very helpful to a variety of downstream tasks. The model can also handle static image which can be simply fed into the model with T=1. 9 | 10 | ### Pre-training 11 | The model was pre-trained with NASA's HLS2 L30 product (30m granularity) from Continental United States for the year 2017. The bands that were used are the following: 12 | 13 | * Blue 14 | * Green 15 | * Red 16 | * Narrow NIR 17 | * SWIR 1 18 | * SWIR 2 19 | 20 | ### Code 21 | The model follows the [original mae repo](https://github.com/facebookresearch/mae) with some modifications including: 22 | 1. replace 2D patch embed with 3D patch embed; 23 | 2. replace 2D positional embed with 3D positional embed; 24 | 3. replace 2D patchify and unpatchify with 3D. 25 | 26 | ### Model weights 27 | The model weights are available [here](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M/blob/main/Prithvi_100M.pt). 28 | -------------------------------------------------------------------------------- /model_inference.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import glob 3 | import os 4 | import time 5 | 6 | import numpy as np 7 | import rasterio 8 | import torch 9 | from mmcv import Config 10 | from mmcv.parallel import collate, scatter 11 | from mmseg.apis import init_segmentor 12 | from mmseg.datasets.pipelines import Compose, LoadImageFromFile 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser( 17 | description="Inference on flood detection fine-tuned model" 18 | ) 19 | parser.add_argument("-config", help="path to model configuration file") 20 | parser.add_argument("-ckpt", help="path to model checkpoint") 21 | parser.add_argument("-input", help="path to input images folder for inference") 22 | parser.add_argument("-output", help="path to save output image") 23 | parser.add_argument("-input_type", help="file type of input images", default="tif") 24 | parser.add_argument( 25 | "-bands", 26 | help="bands in the file where to find the relevant data", 27 | type=int, 28 | nargs="+", 29 | ) 30 | parser.add_argument("-device", help="device", default="cuda", type=str) 31 | 32 | args = parser.parse_args() 33 | 34 | return args 35 | 36 | 37 | def open_tiff(fname): 38 | with rasterio.open(fname, "r") as src: 39 | data = src.read() 40 | 41 | return data 42 | 43 | 44 | def write_tiff(img_wrt, filename, metadata): 45 | """ 46 | It writes a raster image to file. 47 | 48 | :param img_wrt: numpy array containing the data (can be 2D for single band or 3D for multiple bands) 49 | :param filename: file path to the output file 50 | :param metadata: metadata to use to write the raster to disk 51 | :return: 52 | """ 53 | 54 | with rasterio.open(filename, "w", **metadata) as dest: 55 | if len(img_wrt.shape) == 2: 56 | img_wrt = img_wrt[None] 57 | 58 | for i in range(img_wrt.shape[0]): 59 | dest.write(img_wrt[i, :, :], i + 1) 60 | 61 | return filename 62 | 63 | 64 | def get_meta(fname): 65 | with rasterio.open(fname, "r") as src: 66 | meta = src.meta 67 | 68 | return meta 69 | 70 | 71 | def inference_segmentor(model, imgs, custom_test_pipeline=None): 72 | """Inference image(s) with the segmentor. 73 | 74 | Args: 75 | model (nn.Module): The loaded segmentor. 76 | imgs (str/ndarray or list[str/ndarray]): Either image files or loaded 77 | images. 78 | 79 | Returns: 80 | (list[Tensor]): The segmentation result. 81 | """ 82 | cfg = model.cfg 83 | device = next(model.parameters()).device # model device 84 | # build the data pipeline 85 | test_pipeline = ( 86 | [LoadImageFromFile()] + cfg.data.test.pipeline[1:] 87 | if custom_test_pipeline == None 88 | else custom_test_pipeline 89 | ) 90 | test_pipeline = Compose(test_pipeline) 91 | # prepare data 92 | data = [] 93 | imgs = imgs if isinstance(imgs, list) else [imgs] 94 | for img in imgs: 95 | img_data = {"img_info": {"filename": img}} 96 | img_data = test_pipeline(img_data) 97 | data.append(img_data) 98 | # print(data.shape) 99 | 100 | data = collate(data, samples_per_gpu=len(imgs)) 101 | if next(model.parameters()).is_cuda: 102 | # data = collate(data, samples_per_gpu=len(imgs)) 103 | # scatter to specified GPU 104 | data = scatter(data, [device])[0] 105 | else: 106 | # img_metas = scatter(data['img_metas'],'cpu') 107 | # data['img_metas'] = [i.data[0] for i in data['img_metas']] 108 | 109 | img_metas = data["img_metas"].data[0] 110 | img = data["img"] 111 | data = {"img": img, "img_metas": img_metas} 112 | 113 | with torch.no_grad(): 114 | result = model(return_loss=False, rescale=True, **data) 115 | return result 116 | 117 | 118 | def inference_on_file(model, target_image, output_image, custom_test_pipeline): 119 | time_taken = -1 120 | try: 121 | st = time.time() 122 | print("Running inference...") 123 | result = inference_segmentor(model, target_image, custom_test_pipeline) 124 | print("Output has shape: " + str(result[0].shape)) 125 | 126 | ##### get metadata mask 127 | mask = open_tiff(target_image) 128 | meta = get_meta(target_image) 129 | mask = np.where(mask == meta["nodata"], 1, 0) 130 | mask = np.max(mask, axis=0)[None] 131 | 132 | result[0] = np.where(mask == 1, -1, result[0]) 133 | 134 | ##### Save file to disk 135 | meta["count"] = 1 136 | meta["dtype"] = "int16" 137 | meta["compress"] = "lzw" 138 | meta["nodata"] = -1 139 | print("Saving output...") 140 | write_tiff(result[0], output_image, meta) 141 | et = time.time() 142 | time_taken = np.round(et - st, 1) 143 | print( 144 | f"Inference completed in {str(time_taken)} seconds. Output available at: " 145 | + output_image 146 | ) 147 | 148 | except: 149 | print(f"Error on image {target_image} \nContinue to next input") 150 | 151 | return time_taken 152 | 153 | 154 | def process_test_pipeline(custom_test_pipeline, bands=None): 155 | # change extracted bands if necessary 156 | if bands is not None: 157 | extract_index = [ 158 | i for i, x in enumerate(custom_test_pipeline) if x["type"] == "BandsExtract" 159 | ] 160 | 161 | if len(extract_index) > 0: 162 | custom_test_pipeline[extract_index[0]]["bands"] = bands 163 | 164 | collect_index = [ 165 | i for i, x in enumerate(custom_test_pipeline) if x["type"].find("Collect") > -1 166 | ] 167 | 168 | # adapt collected keys if necessary 169 | if len(collect_index) > 0: 170 | keys = [ 171 | "img_info", 172 | "filename", 173 | "ori_filename", 174 | "img", 175 | "img_shape", 176 | "ori_shape", 177 | "pad_shape", 178 | "scale_factor", 179 | "img_norm_cfg", 180 | ] 181 | custom_test_pipeline[collect_index[0]]["meta_keys"] = keys 182 | 183 | return custom_test_pipeline 184 | 185 | 186 | def inference_on_files( 187 | config_path, ckpt, input_type, input_path, output_path, bands, device 188 | ): 189 | # load model 190 | config = Config.fromfile(config_path) 191 | config.model.backbone.pretrained = None 192 | model = init_segmentor(config, ckpt, device) 193 | 194 | # identify images to predict on 195 | target_images = glob.glob(os.path.join(input_path, "*." + input_type)) 196 | 197 | print("Identified images to predict on: " + str(len(target_images))) 198 | 199 | # check if output folder available 200 | if not os.path.isdir(output_path): 201 | os.mkdir(output_path) 202 | 203 | # modify test pipeline if necessary 204 | custom_test_pipeline = process_test_pipeline(model.cfg.data.test.pipeline, bands) 205 | 206 | # for each image predict and save to disk 207 | for i, target_image in enumerate(target_images): 208 | print(f"Working on Image {i}") 209 | output_image = os.path.join( 210 | output_path, 211 | target_image.split("/")[-1].replace( 212 | "." + input_type, "_pred." + input_type 213 | ), 214 | ) 215 | 216 | inference_on_file(model, target_image, output_image, custom_test_pipeline) 217 | 218 | 219 | def main(): 220 | # unpack args 221 | args = parse_args() 222 | config_path = args.config 223 | ckpt = args.ckpt 224 | input_type = args.input_type 225 | input_path = args.input 226 | output_path = args.output 227 | bands = args.bands 228 | device = args.device 229 | 230 | inference_on_files( 231 | config_path, ckpt, input_type, input_path, output_path, bands, device 232 | ) 233 | 234 | 235 | if __name__ == "__main__": 236 | main() 237 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name="geospatial_fm", 5 | version="0.1.0", 6 | description="MMSegmentation classes for geospatial-fm finetuning", 7 | author="Paolo Fraccaro, Carlos Gomes, Johannes Jakubik", 8 | packages=["geospatial_fm"], 9 | license="Apache 2", 10 | long_description=open("README.md").read(), 11 | install_requires=[ 12 | "mmsegmentation @ git+https://github.com/open-mmlab/mmsegmentation.git@186572a3ce64ac9b6b37e66d58c76515000c3280", 13 | "rasterio", 14 | "rioxarray", 15 | "einops", 16 | "timm==0.4.12", 17 | "tensorboard", 18 | "imagecodecs", 19 | "yapf==0.40.1", 20 | ], 21 | ) 22 | --------------------------------------------------------------------------------