├── .idea
├── .gitignore
├── SLM.iml
├── dictionaries
│ └── XiaoYuan.xml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── LICENSE
├── README.md
├── README_v2.md
├── SECURITY.md
├── evaluations
└── SLM.py
├── figure
├── SeLo.jpg
├── compare.jpg
├── csm3.jpg
├── demo.gif
├── fig1.jpg
├── indicator.jpg
├── indicator_verify.jpg
├── sample.jpg
├── selo_with_subtask.jpg
├── stage1.jpg
├── stage2.jpg
├── time_analse.png
└── visual_data2.jpg
├── predict
├── __pycache__
│ ├── model_init.cpython-37.pyc
│ └── vocabs.cpython-37.pyc
├── cache
│ └── README.md
├── checkpoints
│ └── README.md
├── data
│ └── rsitmd_precomp
│ │ ├── test_caps.txt
│ │ ├── test_filename.txt
│ │ ├── train_caps.txt
│ │ └── train_filename.txt
├── evaluate_selo.py
├── generate_selo.py
├── generate_selo_v2.py
├── layers
│ ├── AMFMN.py
│ ├── AMFMN_utils.py
│ ├── LW_MCR.py
│ ├── MCR_utils.py
│ ├── __pycache__
│ │ ├── AMFMN.cpython-37.pyc
│ │ ├── AMFMN_utils.cpython-37.pyc
│ │ └── seq2vec.cpython-37.pyc
│ └── seq2vec.py
├── model_encoder.py
├── model_init.py
├── option
│ └── RSITMD
│ │ ├── RSITMD_AMFMN.yaml
│ │ └── RSITMD_LW_MCR.yaml
├── test_data
│ └── sparseresidential_3814.tif
├── util
│ └── convert_data.py
├── utils.py
├── vocab
│ └── rsitmd_splits_vocab.json
└── vocabs.py
├── requirements.txt
└── test_data
├── annotations
└── anno.json
├── imgs
└── README.md
└── visual_testdata.py
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/SLM.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/dictionaries/XiaoYuan.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Govern
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## The offical code for paper ["Learning to Evaluate Performance of Multi-modal Semantic Localization", TGRS 2022.](https://arxiv.org/abs/2209.06515)
2 | ##### Author: Zhiqiang Yuan, Chongyang Li, Zhuoying Pan, et. al
3 |
4 |
5 | 
6 | 
7 | 
8 |
9 |
10 | ### -------------------------------------------------------------------------------------
11 | ### Welcome :+1:_`Fork and Star`_:+1:, then we'll let you know when we update
12 |
13 | ### -------------------------------------------------------------------------------------
14 | ### We recently released SeLo v2[\[link\]](https://github.com/xiaoyuan1996/SemanticLocalizationMetrics/blob/master/README_v2.md)., which improves SeLo from the speed and accuracy.
15 |
16 | ### -------------------------------------------------------------------------------------
17 |
18 | #### Contexts
19 |
20 | * [Introduction](#introduction)
21 | * [Dataset And Metrics](#dataset-and-metrics)
22 | * [Testdata](#testdata)
23 | * [Metrics](#metrics)
24 | * [Baselines](#baselines)
25 | * [Comparison of SeLo Performance on Different Trainsets](#comparison-of-selo-performance-on-different-trainsets)
26 | * [Comparison of SeLo Performance on Different Scales](#comparison-of-selo-performance-on-different-scales)
27 | * [Comparison of SeLo Performance on Different Retrieval Models](#comparison-of-selo-performance-on-different-retrieval-models)
28 | * [Analysis of Time Consumption](#analysis-of-time-consumption)
29 | * [Implementation](#implementation)
30 | * [Environment](#environment)
31 | * [Run The Demo](#run-the-demo)
32 | * [Customize Model](#customize-model)
33 | * [Epilogue](#epilogue)
34 | * [Citation](#citation)
35 | ### -------------------------------------------------------------------------------------
36 |
37 |
38 |
39 | ## INTRODUCTION
40 | An official evaluation metric for semantic localization.
41 |
42 |
43 |
44 | **Fig.1.** (a) Results of airplane detection. (b) Results of semantic localization with query of ``white planes parked in the open space of the white airport''. Compared with tasks such as detection, SeLo achieves semantic-level retrieval with only caption-level annotation during training, which can adapt to higher-level retrieval tasks.
45 |
46 |
47 |
48 |
49 |
50 | **Fig.2.** Visualization of SeLo with query of "the red rails where the grey train is located run through the residential area".
51 |
52 |
53 | The semantic localization (SeLo) task refers to using cross-modal information such as text to quickly localize RS images at the semantic level [\[link\]](https://ieeexplore.ieee.org/document/9437331).
54 | This task implements semantic-level detection, which only uses caption-level supervision information.
55 | In our opinion, it is a meaningful and interesting work, which realizes the unification of sub-tasks such as detection and segmentation.
56 |
57 | 
58 |
59 | **Fig.3.** Framework of Semantic Localization. After multi-scale segmentation of large RS images, we perform cross-modal similarity calculation on query and multiple slices. The calculated regional probabilities are then utilized to pixel-level averaging, which generates the SeLo map after further noise suppression.
60 |
61 | We contribute test sets, evaluation metrics and baselines for semantic localization, and provide a detailed demo to use this evaluation framework.
62 | Any questions can open a Github [issue](https://github.com/xiaoyuan1996/SemanticLocalizationMetrics/issues).
63 | Start and enjoy!
64 |
65 |
66 |
67 | ### -------------------------------------------------------------------------------------
68 |
69 | ## DATASET AND METRICS
70 |
71 | #### TESTDATA
72 |
73 | We contribute a semantic localization testset to provide systematic evaluation for SeLo task.
74 | The images in SLT come from Google Earth, and Fig. 4 exhibits several samples from the testset.
75 | Every sample includes a large image in RS scene with the size of 3k × 2k to 10k × 10k, a query sentence, and one or more corresponding semantic bounding boxes.
76 |
77 |
78 |
79 |
80 | **Fig.4.** Four samples of Semantic Localization Testset. (a) Query: “ships without cargo floating on the black sea are docked in the port”. (b) Query: “a white
81 | airplane ready to take off on a grayblack runway”. (c) Query: “some cars are parked in a parking lot surrounded by green woods”. (d) Query: “the green
82 | football field is surrounded by a red track”.
83 |
84 | **TABLE I** Quantitative Statistics of Semantic Localization Testset.
85 |
86 | | Parameter | Value | Parameter | Value |
87 | | ------------- | ------| ---------------------| -------|
88 | | Word Number | 160 | Caption Ave Length | 11.2 |
89 | | Sample Number | 59 | Ave Resolution Ratio (m) | 0.3245 |
90 | | Channel Number| 3 | Ave Region Number | 1.75 |
91 | | Image Number | 22 | Ave Attention Ratio | 0.068 |
92 |
93 |
94 |
95 |
96 | #### METRICS
97 |
98 | We systematically model and study semantic localization in detail, and propose multiple discriminative evaluation metrics to quantify this task based on significant area proportion, attention shift distance, and discrete attention distance.
99 |
100 |
101 |
102 | **Fig.5.** Three proposed evaluation metrics for semantic localization. (a) Rsu aims to calculate the attention ratio of the ground-truth area to the useless
103 | area. (b) Ras attempts to quantify the shift distance of the attention from the GT center. (c) Rda evaluates the discreteness of the generated attention from
104 | probability divergence distance and candidate attention number.
105 |
106 | **TABLE II** Explanation of the indicators.
107 |
108 | | Indicator | Range | Meaning |
109 | | --------- | ------| ---------|
110 | | Rsu | ↑ [ 0 ~ 1 ] | Calc the salient area proportion |
111 | | Ras | ↓ [ 0 ~ 1 ] | Makes attention center close to annotation center |
112 | | Rda | ↑ [ 0 ~ 1 ] | Makes attention center focus on one point |
113 | | Rmi | ↑ [ 0 ~ 1 ] | Calculate the mean indicator of SeLo task |
114 |
115 |
116 |
117 |
118 |
119 | **Fig.6.** Qualitative analysis of SeLo indicator validity. (a) Query: “eight large white oil storage tanks built on grey concrete floor”. (b) Query: “a white plane
120 | parked in a tawny clearing inside the airport”. (c) Query: “lots of white and black planes parked inside the grey and white airport”.
121 |
122 | ## BASELINES
123 |
124 | All experiments all carried out at Intel(R) Xeon(R) Gold 6226R CPU @2.90GHz and a single NVIDIA RTX 3090 GPU.
125 |
126 | #### Comparison of SeLo Performance on Different Trainsets
127 |
128 | | Trainset | ↑ Rsu | ↑ Rda | ↓ Ras | ↑ Rmi |
129 | | ------------- | ------| -------------- | -------| -------|
130 | | Sydney | 0.5844 | 0.5670 | 0.5026 | 0.5496 |
131 | | UCM | 0.5821 | 0.4715 | 0.5277 | 0.5160 |
132 | | RSITMD| **0.6920** | **0.6667** | **0.3323** | **0.6772** |
133 | | RSICD | 0.6661 | 0.5773 | 0.3875 | 0.6251
134 |
135 |
136 | #### Comparison of SeLo Performance on Different Scales
137 |
138 | | | Scale-128 | Scale-256 | Scale-512 | Scale-768 | ↑ Rsu | ↑ Rda | ↓ Ras | ↑ Rmi | Time (m)
139 | | --- | ----------| ----------| ----------| -----------| ----- | ----- | ----- | ----- | ----- |
140 | | s1 | √ | √ | | | 0.6389 | 0.6488 | 0.2878 | 0.6670 | 33.81 |
141 | | s2 | | √ | √ | | 0.6839 | 0.6030 | 0.3326 | 0.6579 | 14.25 |
142 | | s3 | | | √ | √ | 0.6897 | 0.6371 | 0.3933 | 0.6475 | **11.23** |
143 | | s4 | √ | √ | √ | | 0.6682 | **0.7072** | **0.2694** | **0.6998** | 34.60 |
144 | | s5 | | √ | √ | √ | **0.6920** | 0.6667 | 0.3323 | 0.6772 | 16.92 |
145 | | s6 | √ | √ | √ | √ | 0.6809 | 0.6884 | 0.3025 | 0.6886 | 36.28 |
146 |
147 |
148 | #### Comparison of SeLo Performance on Different Retrieval Models
149 |
150 | | Trainset | ↑ Rsu | ↑ Rda | ↓ Ras | ↑ Rmi | Time (m) |
151 | | ------------- | ------| -------------- | -------| -------| -------- |
152 | | VSE++ | 0.6364 | 0.5829 | 0.4166 | 0.6045 | 15.61
153 | | LW-MCR | 0.6698 | 0.6021 | 0.4335 | 0.6167 | **15.47**
154 | | SCAN| 0.6421 | 0.6132 | 0.3871 | 0.6247 | 16.32
155 | | CAMP | 0.6819 | 0.6314 | 0.3912 | 0.6437 | 18.24
156 | | AMFMN | **0.6920** | **0.6667** | **0.3323** | **0.6772** | 16.92
157 |
158 | #### Analysis of Time Consumption
159 |
160 | | Scale (128, 256) | Cut | Sim | Gnt | Flt | Total |
161 | | ------------- | ------| -------------- | -------| -------| ------|
162 | | Times(m) | 2.85| 20.60 | 7.40| 0.73| 33.81|
163 | | Rate(%) | 8.42| 60.94 | 21.88| 2.16| -|
164 |
165 | | Scale (512, 768) | Cut | Sim | Gnt | Flt | Total |
166 | | ------------- | ------| -------------- | -------| -------| ------|
167 | | Times(m) | 0.46| 1.17 | 6.96| 0.67| 11.23|
168 | | Rate(%) | 4.06| 10.42 | 61.98| 5.97| -|
169 |
170 | | Scale (256, 512, 768) | Cut | Sim | Gnt | Flt | Total |
171 | | ------------- | ------| -------------- | -------| -------| ------|
172 | | Times(m) | 0.93| 5.72 | 7.38| 0.74| 16.92|
173 | | Rate(%) | 5.52| 33.82 | 43.60| 4.37| -|
174 |
175 | ## IMPLEMENTATION
176 |
177 | #### ENVIRONMENT
178 |
179 | 1.Pull our project and install the requirements, make sure the code path only include English:
180 |
181 | ```
182 | $ apt-get install python3
183 | $ git clone git@github.com:xiaoyuan1996/SemanticLocalizationMetrics.git
184 | $ cd SemanticLocalizationMetrics
185 | $ pip install -r requirements.txt
186 | ```
187 |
188 | 2.Prepare checkpoints and test iamges:
189 |
190 |
191 | * Download pretrain checkpoints **AMFMN.tar** to **./predict/checkpoints/** from [GoogleDriver](https://drive.google.com/drive/folders/1LISJHiLVxPCiry1i7xJtOvuQ77nbEZD1?usp=sharing), make sure:
192 |
193 | + ./predict/checkpoints/
194 | + xxx.pth
195 |
196 | * Download test images **SLM_testimgs.zip** to **./test_data/imgs/** from [BaiduYun (passwd: NIST)](https://pan.baidu.com/s/1DyRbY7s3jx-ZCWbcC_XHlw) or [GoogleDriver](https://drive.google.com/drive/folders/1LISJHiLVxPCiry1i7xJtOvuQ77nbEZD1?usp=sharing), make sure:
197 |
198 | + ./test_data/imgs/
199 | + xxx.tif
200 |
201 | 3.Download SkipThought Files to **/data** from [seq2vec (Password:NIST)](https://pan.baidu.com/s/1FOPldSGO6ctETiXMlPGC8g) (or other path, but you should change the **option['model']['seq2vec']['dir_st']**)
202 |
203 | 4.Check the environments
204 |
205 | ```
206 | $ cd predict
207 | $ python model_encoder.py
208 |
209 | visual_vector: (512,)
210 | text_vector: (512,)
211 | Encoder test successful!
212 | Calc sim successful!
213 | ```
214 |
215 |
216 | #### RUN THE DEMO
217 | Run the follow command, and generated SeLo maps will be saved in **cache/**.
218 | ```
219 | $ cd predict
220 | $ nohup python generate_selo.py --cache_path cache/RSITMD_AMFMN
221 | $ tail -f cache/RSITMD_AMFMN/log.txt
222 |
223 | 2022-05-05 22:01:58,180 - __main__ - INFO - Processing 31/59: 20.jpg
224 | 2022-05-05 22:01:58,180 - __main__ - INFO - Corresponding text: lots of white and black planes parked inside the grey and white airport.
225 |
226 | 2022-05-05 22:01:59,518 - __main__ - INFO - img size:10000x10001
227 | 2022-05-05 22:01:59,518 - __main__ - INFO - Start split images with step 256
228 | 2022-05-05 22:02:02,657 - __main__ - INFO - Start split images with step 512
229 | 2022-05-05 22:02:04,077 - __main__ - INFO - Start split images with step 768
230 | 2022-05-05 22:02:04,818 - __main__ - INFO - Image ../test_data/imgs/20.jpg has been split successfully.
231 | 2022-05-05 22:02:04,819 - __main__ - INFO - Start calculate similarities ...
232 | 2022-05-05 22:02:48,182 - __main__ - INFO - Calculate similarities in 43.36335849761963s
233 | 2022-05-05 22:02:48,182 - __main__ - INFO - Start generate heatmap ...
234 | 2022-05-05 22:03:40,673 - __main__ - INFO - Generate finished, start optim ...
235 | 2022-05-05 22:03:45,500 - __main__ - INFO - Generate heatmap in 57.31790471076965s
236 | 2022-05-05 22:03:45,500 - __main__ - INFO - Saving heatmap in cache/heatmap_31.jpg ...
237 | 2022-05-05 22:03:45,501 - __main__ - INFO - Saving heatmap in cache/addmap_31.jpg ...
238 | 2022-05-05 22:03:45,501 - __main__ - INFO - Saving heatmap in cache/probmap_31.jpg ...
239 | 2022-05-05 22:03:48,540 - __main__ - INFO - Saved ok.
240 | 2022-05-05 22:03:59,562 - root - INFO - Eval cache/probmap_31.jpg
241 | 2022-05-05 22:03:59,562 - root - INFO - +++++++ Calc the SLM METRICS +++++++
242 | 2022-05-05 22:03:59,562 - root - INFO - ++++ ↑ Rsu [0 ~ 1]:0.9281 ++++
243 | 2022-05-05 22:03:59,562 - root - INFO - ++++ ↑ Rda [0 ~ 1]:0.4689 ++++
244 | 2022-05-05 22:03:59,562 - root - INFO - ++++ ↓ Ras [0 ~ 1]:0.0633 ++++
245 | 2022-05-05 22:03:59,562 - root - INFO - ++++ ↑ Rmi [0 ~ 1]:0.8163 ++++
246 | 2022-05-05 22:03:59,562 - root - INFO - ++++++++++++++++++++++++++++++++++++
247 | ...
248 |
249 | $ ls cache/RSITMD_AMFMN
250 | ```
251 |
252 |
253 |
254 | #### CUSTOMIZE MODEL
255 |
256 | 1. Put the pretrain ckpt file to **checkpoints**.
257 | 2. Add your own model to **layers** and corresponding config yaml to **options/**.
258 | 3. Change **model_init.model_init** to your own models.
259 | 4. Add the class of **EncoderSpecModel** to **model_encoder.py**.
260 | 5. Run:
261 | ```
262 | python generate_selo.py --yaml_path option/xxx.yaml --cache_path cache/xxx
263 | ```
264 |
265 | ## EPILOGUE
266 | So far, our attitude towards the semantic localization task is positive and optimistic, which realizes the detection at the semantic level with only the annotation at the caption level.
267 | We sincerely hope that this project will facilitate the development of semantic localization tasks.
268 | We welcome researchers to look into this direction, which is a possibility to achieve refined object semantic detection.
269 |
270 | 
271 |
272 | **Fig.7.** Combine SeLo with other tasks. The top of the figure shows the detection results after add the SeLo map with query of “two parallel green
273 | playgrounds”. The bottom of the figure shows the road extraction results after add the SeLo map with query of “the red rails where the grey train is located
274 | run through the residential area”. (a) Source images. (b) Results of specific tasks. (c) Results of specific SeLo maps. (d) Fusion results of specific tasks and
275 | SeLo map.
276 |
277 | ## CITATION
278 | ```
279 | Z. Yuan et al., "Learning to Evaluate Performance of Multi-modal Semantic Localization," in IEEE Transactions on Geoscience and Remote Sensing, doi: 10.1109/TGRS.2022.3207171.
280 | ```
281 |
282 | ## OTHER CITATION
283 | ```
284 | Z. Yuan et al., "Exploring a Fine-Grained Multiscale Method for Cross-Modal Remote Sensing Image Retrieval," in IEEE Transactions on Geoscience and Remote Sensing, doi: 10.1109/TGRS.2021.3078451.
285 |
286 | Z. Yuan et al., "A Lightweight Multi-scale Crossmodal Text-Image Retrieval Method In Remote Sensing," in IEEE Transactions on Geoscience and Remote Sensing, doi: 10.1109/TGRS.2021.3124252.
287 |
288 | Z. Yuan et al., "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information," in IEEE Transactions on Geoscience and Remote Sensing, doi: 10.1109/TGRS.2022.3163706.
289 | ```
290 |
--------------------------------------------------------------------------------
/README_v2.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | Based on SeLo, we conduct a more in-depth exploration and propose SeLo v2. Compared with initial SeLo framework, SeLo v2 has better semantic localization performance with only half the time consumed, which show the great advantages of SeLo v2 in terms of time and accuracy.
4 |
5 | 
6 |
7 | (a) Caption (b) SeLo (c) SeLo v2. SeLo refers to the task of obtaining the most relevant locations in large-scale RS images using semantic information such as text. Compared with SeLo, SeLo v2 weakens the attention of non-target pixels and achieves more detailed semantic-level retrieval.
8 |
9 | # Method
10 |
11 | ## Multilevel Likelihood Expansion
12 |
13 | 
14 |
15 | In the first stage, the RS image is cropped with a larger scale and a single ratio. The purpose of this cropping method is to make the large slice contain more semantic information and roughly lock the slice where the target object may be located.
16 |
17 | 
18 |
19 | In the second stage, we use the pixel continuity of the target object to perform multi-scale expansion inward and outward on the initially determined large slice.
20 |
21 | ## Chaotic Self-feeding Mechanism
22 |
23 | 
24 |
25 | Fig. 3. Schematic diagram of the proposed chaotic self-feeding mechanism.In the case of a small number of samples, the features generated by the text encoder are not stable. For this reason, CSM regards it as a high-frequency feature, and generates a fundamental frequency representationthrough confusion to obtain a more stable representation.
26 |
27 | # Results
28 |
29 | 
30 |
31 | Quantitative Comparison of SeLo Performance on Different
32 |
33 | | Model | ↑Rsu | ↑Rda | ↓Ras | ↑Rmi | Time(m) |
34 | | :------------: | :--------: | :--------: | :--------: | :--------: | :-------: |
35 | | VSE++ | 0.6364 | 0.5829 | 0.4166 | 0.6045 | 25.23 |
36 | | LW-MCR | 0.6698 | 0.6021 | 0.4335 | 0.6167 | 25.01 |
37 | | SCAN | 0.6421 | 0.6132 | 0.3871 | 0.6247 | 26.38 |
38 | | CAMP | 0.6819 | 0.6314 | 0.3912 | 0.6437 | 29.48 |
39 | | AMFMN | 0.6920 | 0.6667 | 0.3323 | 0.6772 | 27.35 |
40 | | **AMFMN-ours** | **0.7199** | **0.6658** | **0.2925** | **0.7021** | **13.34** |
41 |
42 | Experimental Results of SeLo V2 Method with Different Structures
43 |
44 | | Ablation Method | VS | MLE | STE | CSM | Med | Gass | ↑Rsu | ↑Rda | ↓Ras | ↑Rmi | Time(m) |
45 | | :-------------: | ---- | :--: | ---- | ---- | ---- | :--: | ------ | ------ | ------ | ------ | ------- |
46 | | m1 | √ | | √ | | √ | | 0.6920 | 0.6667 | 0.3323 | 0.6772 | 27.3510 |
47 | | m2 | | √ | √ | | √ | | 0.6976 | 0.6381 | 0.3302 | 0.6730 | 22.7770 |
48 | | m3 | √ | | √ | | | √ | 0.6942 | 0.6792 | 0.3164 | 0.6867 | 23.2008 |
49 | | m4 | | √ | √ | | | √ | 0.7002 | 0.6575 | 0.3330 | 0.6780 | 14.3155 |
50 | | m5 | | √ | | √ | | √ | 0.7199 | 0.6658 | 0.2975 | 0.7021 | 13.3447 |
51 |
52 | This paper proposes a multilevel likelihood expansion and chaotic self-feeding mechanism for faster and higher SeLo task. Qualitative and quantitative experiments show that the proposed SeLo v2 has effectively promoted the development of RS image SeLo tasks.
53 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | Use this section to tell people about which versions of your project are
6 | currently being supported with security updates.
7 |
8 | | Version | Supported |
9 | | ------- | ------------------ |
10 | | 5.1.x | :white_check_mark: |
11 | | 5.0.x | :x: |
12 | | 4.0.x | :white_check_mark: |
13 | | < 4.0 | :x: |
14 |
15 | ## Reporting a Vulnerability
16 |
17 | Use this section to tell people how to report a vulnerability.
18 |
19 | Tell them where to go, how often they can expect to get an update on a
20 | reported vulnerability, what to expect if the vulnerability is accepted or
21 | declined, etc.
22 |
--------------------------------------------------------------------------------
/evaluations/SLM.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/03/08
6 |
7 | import logging
8 | from functools import reduce
9 |
10 | import cv2
11 | import numpy as np
12 | from scipy.ndimage import maximum_filter
13 | from skimage import measure
14 |
15 |
16 | class SLM(object):
17 | def __init__(self):
18 | # logging
19 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
20 | self.logger = logging.getLogger()
21 |
22 | # parameters
23 | self.rsu_beta = 0.707
24 | self.rsu_eps = 1e-7
25 |
26 | self.ras_expand_factor = 1.5
27 | self.ras_filter_times = 5
28 | self.ras_scala_beta = 3
29 |
30 | self.rda_eta = 0.5
31 |
32 | self.rmi_wsu = 0.4
33 | self.rmi_was = 0.35
34 | self.rmi_wda = 0.25
35 |
36 | # visual settings
37 | self.visual_ras = False
38 | self.src_addmap_path = None
39 |
40 | # sum indicator
41 | self.all_metrics = self._format_output_dict()
42 |
43 | def _format_output_dict(self, *params):
44 | """
45 | format output dict
46 | :param params: keys
47 | :return: format dict
48 | """
49 | len_params = len(params)
50 | if len_params == 0: init_param = [[] for i in range(4)]
51 | elif len_params == 4: init_param = params
52 | else: raise NotImplementedError
53 |
54 | return {
55 | "↑ Rsu [0 ~ 1]": init_param[0],
56 | "↑ Rda [0 ~ 1]": init_param[1],
57 | "↓ Ras [0 ~ 1]": init_param[2],
58 | "↑ Rmi [0 ~ 1]": init_param[3]
59 | }
60 |
61 | def logging_acc(self, metrics_dict, prob_path = None, ave = False):
62 | """
63 | logging the metrics
64 | :param metrics_dict: dict of metrics
65 | :param prob_path: path
66 | :return: 0
67 | """
68 |
69 | if not ave:
70 | self.logger.info("Eval {}".format(prob_path))
71 | else:
72 | self.logger.info("+++++++++++++++Average++++++++++++++")
73 |
74 | self.logger.info("+++++++ Calc the SLM METRICS +++++++")
75 | for metric, value in metrics_dict.items():
76 | self.logger.info("++++ {}:{:.4f} ++++".format(metric, value))
77 | self.logger.info("++++++++++++++++++++++++++++++++++++\n")
78 |
79 | def set_visual_options(self, visual_ras, src_addmap_path):
80 | """
81 | set visual options
82 | :param visual_ras: flag
83 | :param src_addmap_path: set src addmap path
84 | """
85 | self.visual_ras = visual_ras
86 | self.src_addmap_path = src_addmap_path
87 | return True
88 |
89 | def read_gray_to_prob(self, probmap_path):
90 | """
91 | Read the prob maps, and trans to probility
92 | :param probmap_path: probmap routh
93 | :return: probability
94 | """
95 | gray_image = cv2.imread(probmap_path, cv2.IMREAD_GRAYSCALE)
96 | prob = gray_image / 255.0
97 | return prob
98 |
99 | def generate_mask_by_points(self, prob, points_list):
100 | """
101 | Generate mask by regions
102 | :param prob: probability
103 | :param points_list: regions
104 | :return: mask
105 | """
106 | H, W = prob.shape
107 |
108 | mask = np.zeros((H, W))
109 |
110 | points_list = [np.array(i, np.int32) for i in points_list]
111 |
112 | # fill
113 | cv2.fillPoly(mask, points_list, 1)
114 |
115 | return mask
116 |
117 | def _get_region_center_radius(self, region_point):
118 | """
119 | get the region center and radius
120 | :param region_point: regions
121 | :return: mid_x, mid_y, radius
122 | """
123 | mid_x = int(reduce(lambda x, y: x+y, np.array(region_point)[:, 0]) / len(region_point))
124 | mid_y = int(reduce(lambda x, y: x+y, np.array(region_point)[:, 1]) / len(region_point))
125 | radius = int(np.mean([np.linalg.norm(np.array(point) - np.array([mid_x, mid_y])) for point in region_point]) * self.ras_expand_factor)
126 | return mid_x, mid_y, radius
127 |
128 | def _get_prob_center_in_gray(self, prob):
129 | """
130 | get the top point with the highest probability from the probability map
131 | :param prob: probability
132 | :return: centers
133 | """
134 |
135 | # recover the prob
136 | gray_img = np.asarray(prob * 255.0, dtype=np.uint8)
137 |
138 | # construct continuous area
139 | continuous_area = np.asarray(gray_img > 150, np.uint8) * 255
140 | continuous_area = np.uint8(measure.label(continuous_area, connectivity=2))
141 |
142 | # soften
143 | for i in range(self.ras_filter_times):
144 | gray_img = cv2.boxFilter(gray_img, ddepth=-1, ksize=(50, 50))
145 |
146 | # get probability binary map
147 | mx = maximum_filter(gray_img, size=1000)
148 | gray_img = np.where(mx == gray_img, gray_img, 0)
149 | gray_img = np.asarray(gray_img > 0, np.uint8) * 255
150 |
151 | # get probability area information
152 | labels = measure.label(gray_img, connectivity=2)
153 | all_region_infos = measure.regionprops(labels)
154 | centers = [[int(i) for i in prop.centroid][::-1] for prop in all_region_infos]
155 |
156 | # construct v-center list and sort
157 | v_center = [[c[0], c[1], prob[c[1]][c[0]]] for c in centers]
158 | v_center.sort(key= lambda x: x[2], reverse=True)
159 | centers = list(map(lambda x: x[:2], v_center))
160 |
161 | # filter centers
162 | centers = [i for i in centers if prob[i[1]][i[0]] >= 0.5]
163 |
164 | return centers, continuous_area
165 |
166 | def _get_offset_between_real_and_synthetic(self, real_center_radius, prob_centers, bina_img):
167 | """
168 | calculate true center offset from result center
169 | :param real_center_radius: real_center_radius
170 | :param prob_centers: prob_centers
171 | :return: offsets
172 | """
173 |
174 | # check prob_centers is not None
175 | if len(prob_centers) == 0 : return [real_center_radius[0][2]]
176 |
177 | offsets = []
178 | for center_radius in real_center_radius:
179 | x, y, r = center_radius
180 |
181 | # calc the l2 dis
182 | dises = list(map(lambda p: np.linalg.norm(np.array([x, y] - np.array(p))), prob_centers))
183 |
184 | # filter the dis in cicle
185 | dises = list(filter(lambda d: d <= r, dises))
186 |
187 | # if no prob center set it to radius
188 | offsets.append(np.mean(dises) if len(dises) != 0 else r)
189 |
190 | return offsets
191 |
192 | def _trans_ras_offset_to_scalable_ras(self, offsets, centers_and_radius):
193 | """
194 | convert distance offset to ras value
195 | :param offsets: offsets
196 | :return: centers_and_radius
197 | """
198 |
199 | # granular transformation
200 | granular_offet = np.mean([off/v[2] for off, v in zip(offsets, centers_and_radius)])
201 |
202 | # scala transformation
203 | granular_offet = (np.exp(self.ras_scala_beta * granular_offet) - 1) / (np.exp(self.ras_scala_beta) - 1)
204 |
205 | return granular_offet
206 |
207 | def ras(self, region_lists, prob, visual=True, src_img=None):
208 | """
209 | calc the matric of ras: makes attention center close to annotation center
210 | :param region_lists: regions
211 | :param prob: probability
212 | :return: ras
213 | """
214 |
215 | # get the annotation center and radius
216 | centers_and_radius = [self._get_region_center_radius(i) for i in region_lists]
217 |
218 | # get the point with the highest probability from the probability map
219 | prob_centers, bina_img = self._get_prob_center_in_gray(prob)
220 |
221 | # calculate true center offset from result center
222 | offsets = self._get_offset_between_real_and_synthetic(centers_and_radius, prob_centers, bina_img)
223 |
224 | # convert distance offset to rcs value
225 | ras = self._trans_ras_offset_to_scalable_ras(offsets, centers_and_radius)
226 |
227 | # visual
228 | if visual and (src_img != None):
229 | src_img = cv2.imread(src_img)
230 |
231 | # logging something
232 | print("centers_and_radius: ", centers_and_radius)
233 | print("prob_centers: ", prob_centers)
234 | print("offsets: ", offsets)
235 |
236 | # backup area
237 | for c_r in centers_and_radius:
238 | cv2.circle(src_img, (c_r[0], c_r[1]), c_r[2], 2, 3)
239 |
240 | # candidate points
241 | for idx, point in enumerate(prob_centers):
242 | cv2.circle(src_img, tuple(point), 6*(idx+1), 1, 4)
243 | cv2.putText(src_img, str(idx+1), tuple(point), cv2.FONT_HERSHEY_COMPLEX, 6, (0, 0, 0), 25)
244 |
245 | cv2.imwrite("./img_circle.jpg", src_img)
246 |
247 | print(prob_centers)
248 |
249 | return ras
250 |
251 | def rsu(self, prob, mask):
252 | """
253 | calc the salient area proportion
254 | :param prob: probability
255 | :param mask: mask
256 | :return: rsu
257 | """
258 |
259 | all_mask_value = np.sum(np.multiply(prob, mask))
260 | all_value = np.sum(prob)
261 | H, W = np.shape(mask)
262 | all_mask = np.sum(mask)
263 |
264 | left_frac = all_mask_value / (all_value - all_mask_value + self.rsu_eps)
265 |
266 | right_frac = (H * W - all_mask) / all_mask
267 |
268 | rsu = -np.exp(-1 * self.rsu_beta * left_frac * right_frac) + 1
269 |
270 | return rsu
271 |
272 | def rda(self, region_lists, prob):
273 | """
274 | calc the matric of rda: makes attention center focus on one point
275 | :param region_lists: regions
276 | :param prob: probability
277 | :return: rda
278 | """
279 |
280 | # get the annotation center and radius
281 | centers_and_radius = [self._get_region_center_radius(i) for i in region_lists]
282 |
283 | # get the point with the highest probability from the probability map
284 | prob_centers, bina_img = self._get_prob_center_in_gray(prob)
285 |
286 | # set value
287 | rda = []
288 | for c_r in centers_and_radius:
289 | x, y, r = c_r
290 |
291 | # calc the backup points
292 | backup_points = list(filter(lambda p: np.linalg.norm(np.array([x, y] - np.array(p))) <= r, prob_centers))
293 |
294 | # margin condition
295 | len_backup_points = len(backup_points)
296 | if len_backup_points <= 1 :
297 | rda.append(float(len_backup_points))
298 | continue
299 |
300 | # if len_backup_points >= 2, calc the attention discrete
301 | centers_attention = np.average(backup_points, axis=0)
302 | dises = list(map(lambda p: np.linalg.norm(np.array(centers_attention - np.array(p))), backup_points))
303 | meas_dis = np.mean(dises) / r
304 |
305 | rda_single = 0.5 * (1 - meas_dis) + np.exp(- self.rda_eta * (len_backup_points + 2))
306 |
307 | rda.append(rda_single)
308 |
309 | return np.mean(rda)
310 |
311 |
312 | def rmi(self, rsu, rda, ras):
313 | """
314 | calculate the mean indicator
315 | :param rsu: rsu
316 | :param rda: rda
317 | :param ras: ras
318 | :return: rmi
319 | """
320 | return self.rmi_wsu * rsu + self.rmi_was * (1 - ras) + self.rmi_wda * rda
321 |
322 | def evaluate(self, prob_path, region_list):
323 | """
324 | evaluate the slm task
325 | :param prob_path: probability map path
326 | :param region_list: region points
327 | :return: slm metrics
328 | """
329 | # read prob
330 | prob = self.read_gray_to_prob(prob_path)
331 |
332 | # generate mask
333 | mask = self.generate_mask_by_points(prob, region_list)
334 |
335 | # rsu
336 | rsu = self.rsu(prob, mask)
337 |
338 | # ras
339 | ras = self.ras(region_list, prob, visual=self.visual_ras, src_img=self.src_addmap_path)
340 |
341 | # rda
342 | rda = self.rda(region_list, prob)
343 |
344 | # mi
345 | rmi = self.rmi(rsu, rda, ras)
346 |
347 | # sort metrics
348 | metrics = self._format_output_dict(rsu, rda, ras, rmi)
349 | self.logging_acc(metrics, prob_path)
350 |
351 | return metrics
352 |
353 | def append_metric(self, metric):
354 | """
355 | append metric to calc ave indicator
356 | :param metric: sort metrics
357 | """
358 | for k in metric.keys():
359 | self.all_metrics[k].append(metric[k])
360 |
361 | def get_the_mean_metric(self):
362 | """
363 | get the mean metric
364 | """
365 | mean_metric = {}
366 | for k in self.all_metrics:
367 | mean_metric[k] = np.mean(self.all_metrics[k])
368 |
369 | self.logging_acc(mean_metric, ave=True)
370 | return mean_metric
371 |
372 |
373 | if __name__ == "__main__":
374 |
375 | points = [
376 | [
377 | [
378 | 1163.84851074219,
379 | 948.244812011719
380 | ],
381 | [
382 | 1360.29187011719,
383 | 883.699096679688
384 | ],
385 | [
386 | 1497.80224609375,
387 | 993.146118164063
388 | ],
389 | [
390 | 1649.34436035156,
391 | 1299.03637695313
392 | ],
393 | [
394 | 1660.56970214844,
395 | 2067.9716796875
396 | ],
397 | [
398 | 1691.43933105469,
399 | 2132.51733398438
400 | ],
401 | [
402 | 1635.31262207031,
403 | 2292.478515625
404 | ],
405 | [
406 | 1090.88391113281,
407 | 2289.67211914063
408 | ],
409 | [
410 | 1147.01062011719,
411 | 1481.44812011719
412 | ],
413 | [
414 | 1278.908203125,
415 | 1374.80749511719
416 | ],
417 | [
418 | 1166.65490722656,
419 | 1209.23376464844
420 | ],
421 | [
422 | 1169.46118164063,
423 | 937.019470214844
424 | ],
425 | [
426 | 1169.46118164063,
427 | 937.019470214844
428 | ],
429 | [
430 | 1169.46118164063,
431 | 937.019470214844
432 | ]
433 | ]
434 | ]
435 |
436 | prob_path = "../predict/cache/probmap_9.jpg"
437 | add_path = "../predict/cache/addmap_9.jpg"
438 |
439 | # eval
440 | slm = SLM()
441 | # slm.set_visual_options(visual_ras=True, src_addmap_path=add_path)
442 | metrics = slm.evaluate(prob_path, region_list=points)
443 |
--------------------------------------------------------------------------------
/figure/SeLo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/SeLo.jpg
--------------------------------------------------------------------------------
/figure/compare.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/compare.jpg
--------------------------------------------------------------------------------
/figure/csm3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/csm3.jpg
--------------------------------------------------------------------------------
/figure/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/demo.gif
--------------------------------------------------------------------------------
/figure/fig1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/fig1.jpg
--------------------------------------------------------------------------------
/figure/indicator.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/indicator.jpg
--------------------------------------------------------------------------------
/figure/indicator_verify.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/indicator_verify.jpg
--------------------------------------------------------------------------------
/figure/sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/sample.jpg
--------------------------------------------------------------------------------
/figure/selo_with_subtask.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/selo_with_subtask.jpg
--------------------------------------------------------------------------------
/figure/stage1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/stage1.jpg
--------------------------------------------------------------------------------
/figure/stage2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/stage2.jpg
--------------------------------------------------------------------------------
/figure/time_analse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/time_analse.png
--------------------------------------------------------------------------------
/figure/visual_data2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/figure/visual_data2.jpg
--------------------------------------------------------------------------------
/predict/__pycache__/model_init.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/__pycache__/model_init.cpython-37.pyc
--------------------------------------------------------------------------------
/predict/__pycache__/vocabs.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/__pycache__/vocabs.cpython-37.pyc
--------------------------------------------------------------------------------
/predict/cache/README.md:
--------------------------------------------------------------------------------
1 | Save SeLo map here.
--------------------------------------------------------------------------------
/predict/checkpoints/README.md:
--------------------------------------------------------------------------------
1 | Download SLM_checkpoints.zip, unzip it and put it here.
2 |
3 | ```angular2
4 | -SLM
5 | ---...
6 | ---predict
7 | -----...
8 | -----checkpoints
9 | -------AMFMN.tar
10 | -------RSITMD_LW_MCR.tar
11 |
12 | ```
13 |
--------------------------------------------------------------------------------
/predict/data/rsitmd_precomp/test_filename.txt:
--------------------------------------------------------------------------------
1 | boat_0.tif
2 | boat_0.tif
3 | boat_0.tif
4 | boat_0.tif
5 | boat_0.tif
6 | playground_1.tif
7 | playground_1.tif
8 | playground_1.tif
9 | playground_1.tif
10 | playground_1.tif
11 | airport_2.tif
12 | airport_2.tif
13 | airport_2.tif
14 | airport_2.tif
15 | airport_2.tif
16 | airport_3.tif
17 | airport_3.tif
18 | airport_3.tif
19 | airport_3.tif
20 | airport_3.tif
21 | airport_4.tif
22 | airport_4.tif
23 | airport_4.tif
24 | airport_4.tif
25 | airport_4.tif
26 | airport_5.tif
27 | airport_5.tif
28 | airport_5.tif
29 | airport_5.tif
30 | airport_5.tif
31 | airport_6.tif
32 | airport_6.tif
33 | airport_6.tif
34 | airport_6.tif
35 | airport_6.tif
36 | airport_7.tif
37 | airport_7.tif
38 | airport_7.tif
39 | airport_7.tif
40 | airport_7.tif
41 | airport_8.tif
42 | airport_8.tif
43 | airport_8.tif
44 | airport_8.tif
45 | airport_8.tif
46 | airport_9.tif
47 | airport_9.tif
48 | airport_9.tif
49 | airport_9.tif
50 | airport_9.tif
51 | airport_10.tif
52 | airport_10.tif
53 | airport_10.tif
54 | airport_10.tif
55 | airport_10.tif
56 | airport_11.tif
57 | airport_11.tif
58 | airport_11.tif
59 | airport_11.tif
60 | airport_11.tif
61 | airport_12.tif
62 | airport_12.tif
63 | airport_12.tif
64 | airport_12.tif
65 | airport_12.tif
66 | bareland_13.tif
67 | bareland_13.tif
68 | bareland_13.tif
69 | bareland_13.tif
70 | bareland_13.tif
71 | bareland_14.tif
72 | bareland_14.tif
73 | bareland_14.tif
74 | bareland_14.tif
75 | bareland_14.tif
76 | bareland_15.tif
77 | bareland_15.tif
78 | bareland_15.tif
79 | bareland_15.tif
80 | bareland_15.tif
81 | bareland_16.tif
82 | bareland_16.tif
83 | bareland_16.tif
84 | bareland_16.tif
85 | bareland_16.tif
86 | bareland_17.tif
87 | bareland_17.tif
88 | bareland_17.tif
89 | bareland_17.tif
90 | bareland_17.tif
91 | baseballfield_18.tif
92 | baseballfield_18.tif
93 | baseballfield_18.tif
94 | baseballfield_18.tif
95 | baseballfield_18.tif
96 | baseballfield_19.tif
97 | baseballfield_19.tif
98 | baseballfield_19.tif
99 | baseballfield_19.tif
100 | baseballfield_19.tif
101 | baseballfield_20.tif
102 | baseballfield_20.tif
103 | baseballfield_20.tif
104 | baseballfield_20.tif
105 | baseballfield_20.tif
106 | baseballfield_21.tif
107 | baseballfield_21.tif
108 | baseballfield_21.tif
109 | baseballfield_21.tif
110 | baseballfield_21.tif
111 | baseballfield_22.tif
112 | baseballfield_22.tif
113 | baseballfield_22.tif
114 | baseballfield_22.tif
115 | baseballfield_22.tif
116 | baseballfield_23.tif
117 | baseballfield_23.tif
118 | baseballfield_23.tif
119 | baseballfield_23.tif
120 | baseballfield_23.tif
121 | baseballfield_24.tif
122 | baseballfield_24.tif
123 | baseballfield_24.tif
124 | baseballfield_24.tif
125 | baseballfield_24.tif
126 | baseballfield_25.tif
127 | baseballfield_25.tif
128 | baseballfield_25.tif
129 | baseballfield_25.tif
130 | baseballfield_25.tif
131 | baseballfield_26.tif
132 | baseballfield_26.tif
133 | baseballfield_26.tif
134 | baseballfield_26.tif
135 | baseballfield_26.tif
136 | beach_27.tif
137 | beach_27.tif
138 | beach_27.tif
139 | beach_27.tif
140 | beach_27.tif
141 | beach_28.tif
142 | beach_28.tif
143 | beach_28.tif
144 | beach_28.tif
145 | beach_28.tif
146 | beach_29.tif
147 | beach_29.tif
148 | beach_29.tif
149 | beach_29.tif
150 | beach_29.tif
151 | beach_30.tif
152 | beach_30.tif
153 | beach_30.tif
154 | beach_30.tif
155 | beach_30.tif
156 | beach_31.tif
157 | beach_31.tif
158 | beach_31.tif
159 | beach_31.tif
160 | beach_31.tif
161 | beach_32.tif
162 | beach_32.tif
163 | beach_32.tif
164 | beach_32.tif
165 | beach_32.tif
166 | beach_33.tif
167 | beach_33.tif
168 | beach_33.tif
169 | beach_33.tif
170 | beach_33.tif
171 | beach_34.tif
172 | beach_34.tif
173 | beach_34.tif
174 | beach_34.tif
175 | beach_34.tif
176 | beach_35.tif
177 | beach_35.tif
178 | beach_35.tif
179 | beach_35.tif
180 | beach_35.tif
181 | beach_36.tif
182 | beach_36.tif
183 | beach_36.tif
184 | beach_36.tif
185 | beach_36.tif
186 | beach_37.tif
187 | beach_37.tif
188 | beach_37.tif
189 | beach_37.tif
190 | beach_37.tif
191 | beach_38.tif
192 | beach_38.tif
193 | beach_38.tif
194 | beach_38.tif
195 | beach_38.tif
196 | beach_39.tif
197 | beach_39.tif
198 | beach_39.tif
199 | beach_39.tif
200 | beach_39.tif
201 | beach_40.tif
202 | beach_40.tif
203 | beach_40.tif
204 | beach_40.tif
205 | beach_40.tif
206 | beach_41.tif
207 | beach_41.tif
208 | beach_41.tif
209 | beach_41.tif
210 | beach_41.tif
211 | bridge_42.tif
212 | bridge_42.tif
213 | bridge_42.tif
214 | bridge_42.tif
215 | bridge_42.tif
216 | bridge_43.tif
217 | bridge_43.tif
218 | bridge_43.tif
219 | bridge_43.tif
220 | bridge_43.tif
221 | bridge_44.tif
222 | bridge_44.tif
223 | bridge_44.tif
224 | bridge_44.tif
225 | bridge_44.tif
226 | bridge_45.tif
227 | bridge_45.tif
228 | bridge_45.tif
229 | bridge_45.tif
230 | bridge_45.tif
231 | bridge_46.tif
232 | bridge_46.tif
233 | bridge_46.tif
234 | bridge_46.tif
235 | bridge_46.tif
236 | bridge_47.tif
237 | bridge_47.tif
238 | bridge_47.tif
239 | bridge_47.tif
240 | bridge_47.tif
241 | bridge_48.tif
242 | bridge_48.tif
243 | bridge_48.tif
244 | bridge_48.tif
245 | bridge_48.tif
246 | bridge_49.tif
247 | bridge_49.tif
248 | bridge_49.tif
249 | bridge_49.tif
250 | bridge_49.tif
251 | bridge_50.tif
252 | bridge_50.tif
253 | bridge_50.tif
254 | bridge_50.tif
255 | bridge_50.tif
256 | bridge_51.tif
257 | bridge_51.tif
258 | bridge_51.tif
259 | bridge_51.tif
260 | bridge_51.tif
261 | bridge_52.tif
262 | bridge_52.tif
263 | bridge_52.tif
264 | bridge_52.tif
265 | bridge_52.tif
266 | bridge_53.tif
267 | bridge_53.tif
268 | bridge_53.tif
269 | bridge_53.tif
270 | bridge_53.tif
271 | bridge_54.tif
272 | bridge_54.tif
273 | bridge_54.tif
274 | bridge_54.tif
275 | bridge_54.tif
276 | bridge_55.tif
277 | bridge_55.tif
278 | bridge_55.tif
279 | bridge_55.tif
280 | bridge_55.tif
281 | bridge_56.tif
282 | bridge_56.tif
283 | bridge_56.tif
284 | bridge_56.tif
285 | bridge_56.tif
286 | center_57.tif
287 | center_57.tif
288 | center_57.tif
289 | center_57.tif
290 | center_57.tif
291 | center_58.tif
292 | center_58.tif
293 | center_58.tif
294 | center_58.tif
295 | center_58.tif
296 | center_59.tif
297 | center_59.tif
298 | center_59.tif
299 | center_59.tif
300 | center_59.tif
301 | center_60.tif
302 | center_60.tif
303 | center_60.tif
304 | center_60.tif
305 | center_60.tif
306 | center_61.tif
307 | center_61.tif
308 | center_61.tif
309 | center_61.tif
310 | center_61.tif
311 | center_62.tif
312 | center_62.tif
313 | center_62.tif
314 | center_62.tif
315 | center_62.tif
316 | center_63.tif
317 | center_63.tif
318 | center_63.tif
319 | center_63.tif
320 | center_63.tif
321 | center_64.tif
322 | center_64.tif
323 | center_64.tif
324 | center_64.tif
325 | center_64.tif
326 | center_65.tif
327 | center_65.tif
328 | center_65.tif
329 | center_65.tif
330 | center_65.tif
331 | center_66.tif
332 | center_66.tif
333 | center_66.tif
334 | center_66.tif
335 | center_66.tif
336 | center_67.tif
337 | center_67.tif
338 | center_67.tif
339 | center_67.tif
340 | center_67.tif
341 | center_68.tif
342 | center_68.tif
343 | center_68.tif
344 | center_68.tif
345 | center_68.tif
346 | center_69.tif
347 | center_69.tif
348 | center_69.tif
349 | center_69.tif
350 | center_69.tif
351 | center_70.tif
352 | center_70.tif
353 | center_70.tif
354 | center_70.tif
355 | center_70.tif
356 | church_71.tif
357 | church_71.tif
358 | church_71.tif
359 | church_71.tif
360 | church_71.tif
361 | church_72.tif
362 | church_72.tif
363 | church_72.tif
364 | church_72.tif
365 | church_72.tif
366 | church_73.tif
367 | church_73.tif
368 | church_73.tif
369 | church_73.tif
370 | church_73.tif
371 | church_74.tif
372 | church_74.tif
373 | church_74.tif
374 | church_74.tif
375 | church_74.tif
376 | church_75.tif
377 | church_75.tif
378 | church_75.tif
379 | church_75.tif
380 | church_75.tif
381 | church_76.tif
382 | church_76.tif
383 | church_76.tif
384 | church_76.tif
385 | church_76.tif
386 | church_77.tif
387 | church_77.tif
388 | church_77.tif
389 | church_77.tif
390 | church_77.tif
391 | church_78.tif
392 | church_78.tif
393 | church_78.tif
394 | church_78.tif
395 | church_78.tif
396 | church_79.tif
397 | church_79.tif
398 | church_79.tif
399 | church_79.tif
400 | church_79.tif
401 | church_80.tif
402 | church_80.tif
403 | church_80.tif
404 | church_80.tif
405 | church_80.tif
406 | church_81.tif
407 | church_81.tif
408 | church_81.tif
409 | church_81.tif
410 | church_81.tif
411 | church_82.tif
412 | church_82.tif
413 | church_82.tif
414 | church_82.tif
415 | church_82.tif
416 | church_83.tif
417 | church_83.tif
418 | church_83.tif
419 | church_83.tif
420 | church_83.tif
421 | commercial_84.tif
422 | commercial_84.tif
423 | commercial_84.tif
424 | commercial_84.tif
425 | commercial_84.tif
426 | commercial_85.tif
427 | commercial_85.tif
428 | commercial_85.tif
429 | commercial_85.tif
430 | commercial_85.tif
431 | commercial_86.tif
432 | commercial_86.tif
433 | commercial_86.tif
434 | commercial_86.tif
435 | commercial_86.tif
436 | commercial_87.tif
437 | commercial_87.tif
438 | commercial_87.tif
439 | commercial_87.tif
440 | commercial_87.tif
441 | commercial_88.tif
442 | commercial_88.tif
443 | commercial_88.tif
444 | commercial_88.tif
445 | commercial_88.tif
446 | commercial_89.tif
447 | commercial_89.tif
448 | commercial_89.tif
449 | commercial_89.tif
450 | commercial_89.tif
451 | commercial_90.tif
452 | commercial_90.tif
453 | commercial_90.tif
454 | commercial_90.tif
455 | commercial_90.tif
456 | commercial_91.tif
457 | commercial_91.tif
458 | commercial_91.tif
459 | commercial_91.tif
460 | commercial_91.tif
461 | commercial_92.tif
462 | commercial_92.tif
463 | commercial_92.tif
464 | commercial_92.tif
465 | commercial_92.tif
466 | commercial_93.tif
467 | commercial_93.tif
468 | commercial_93.tif
469 | commercial_93.tif
470 | commercial_93.tif
471 | commercial_94.tif
472 | commercial_94.tif
473 | commercial_94.tif
474 | commercial_94.tif
475 | commercial_94.tif
476 | commercial_95.tif
477 | commercial_95.tif
478 | commercial_95.tif
479 | commercial_95.tif
480 | commercial_95.tif
481 | commercial_96.tif
482 | commercial_96.tif
483 | commercial_96.tif
484 | commercial_96.tif
485 | commercial_96.tif
486 | commercial_97.tif
487 | commercial_97.tif
488 | commercial_97.tif
489 | commercial_97.tif
490 | commercial_97.tif
491 | denseresidential_98.tif
492 | denseresidential_98.tif
493 | denseresidential_98.tif
494 | denseresidential_98.tif
495 | denseresidential_98.tif
496 | denseresidential_99.tif
497 | denseresidential_99.tif
498 | denseresidential_99.tif
499 | denseresidential_99.tif
500 | denseresidential_99.tif
501 | denseresidential_100.tif
502 | denseresidential_100.tif
503 | denseresidential_100.tif
504 | denseresidential_100.tif
505 | denseresidential_100.tif
506 | denseresidential_101.tif
507 | denseresidential_101.tif
508 | denseresidential_101.tif
509 | denseresidential_101.tif
510 | denseresidential_101.tif
511 | denseresidential_102.tif
512 | denseresidential_102.tif
513 | denseresidential_102.tif
514 | denseresidential_102.tif
515 | denseresidential_102.tif
516 | denseresidential_103.tif
517 | denseresidential_103.tif
518 | denseresidential_103.tif
519 | denseresidential_103.tif
520 | denseresidential_103.tif
521 | denseresidential_104.tif
522 | denseresidential_104.tif
523 | denseresidential_104.tif
524 | denseresidential_104.tif
525 | denseresidential_104.tif
526 | denseresidential_105.tif
527 | denseresidential_105.tif
528 | denseresidential_105.tif
529 | denseresidential_105.tif
530 | denseresidential_105.tif
531 | denseresidential_106.tif
532 | denseresidential_106.tif
533 | denseresidential_106.tif
534 | denseresidential_106.tif
535 | denseresidential_106.tif
536 | denseresidential_107.tif
537 | denseresidential_107.tif
538 | denseresidential_107.tif
539 | denseresidential_107.tif
540 | denseresidential_107.tif
541 | denseresidential_108.tif
542 | denseresidential_108.tif
543 | denseresidential_108.tif
544 | denseresidential_108.tif
545 | denseresidential_108.tif
546 | denseresidential_109.tif
547 | denseresidential_109.tif
548 | denseresidential_109.tif
549 | denseresidential_109.tif
550 | denseresidential_109.tif
551 | denseresidential_110.tif
552 | denseresidential_110.tif
553 | denseresidential_110.tif
554 | denseresidential_110.tif
555 | denseresidential_110.tif
556 | denseresidential_111.tif
557 | denseresidential_111.tif
558 | denseresidential_111.tif
559 | denseresidential_111.tif
560 | denseresidential_111.tif
561 | desert_112.tif
562 | desert_112.tif
563 | desert_112.tif
564 | desert_112.tif
565 | desert_112.tif
566 | desert_113.tif
567 | desert_113.tif
568 | desert_113.tif
569 | desert_113.tif
570 | desert_113.tif
571 | desert_114.tif
572 | desert_114.tif
573 | desert_114.tif
574 | desert_114.tif
575 | desert_114.tif
576 | desert_115.tif
577 | desert_115.tif
578 | desert_115.tif
579 | desert_115.tif
580 | desert_115.tif
581 | desert_116.tif
582 | desert_116.tif
583 | desert_116.tif
584 | desert_116.tif
585 | desert_116.tif
586 | desert_117.tif
587 | desert_117.tif
588 | desert_117.tif
589 | desert_117.tif
590 | desert_117.tif
591 | desert_118.tif
592 | desert_118.tif
593 | desert_118.tif
594 | desert_118.tif
595 | desert_118.tif
596 | desert_119.tif
597 | desert_119.tif
598 | desert_119.tif
599 | desert_119.tif
600 | desert_119.tif
601 | desert_120.tif
602 | desert_120.tif
603 | desert_120.tif
604 | desert_120.tif
605 | desert_120.tif
606 | desert_121.tif
607 | desert_121.tif
608 | desert_121.tif
609 | desert_121.tif
610 | desert_121.tif
611 | desert_122.tif
612 | desert_122.tif
613 | desert_122.tif
614 | desert_122.tif
615 | desert_122.tif
616 | desert_123.tif
617 | desert_123.tif
618 | desert_123.tif
619 | desert_123.tif
620 | desert_123.tif
621 | farmland_124.tif
622 | farmland_124.tif
623 | farmland_124.tif
624 | farmland_124.tif
625 | farmland_124.tif
626 | farmland_125.tif
627 | farmland_125.tif
628 | farmland_125.tif
629 | farmland_125.tif
630 | farmland_125.tif
631 | farmland_126.tif
632 | farmland_126.tif
633 | farmland_126.tif
634 | farmland_126.tif
635 | farmland_126.tif
636 | farmland_127.tif
637 | farmland_127.tif
638 | farmland_127.tif
639 | farmland_127.tif
640 | farmland_127.tif
641 | farmland_128.tif
642 | farmland_128.tif
643 | farmland_128.tif
644 | farmland_128.tif
645 | farmland_128.tif
646 | farmland_129.tif
647 | farmland_129.tif
648 | farmland_129.tif
649 | farmland_129.tif
650 | farmland_129.tif
651 | farmland_130.tif
652 | farmland_130.tif
653 | farmland_130.tif
654 | farmland_130.tif
655 | farmland_130.tif
656 | farmland_131.tif
657 | farmland_131.tif
658 | farmland_131.tif
659 | farmland_131.tif
660 | farmland_131.tif
661 | farmland_132.tif
662 | farmland_132.tif
663 | farmland_132.tif
664 | farmland_132.tif
665 | farmland_132.tif
666 | farmland_133.tif
667 | farmland_133.tif
668 | farmland_133.tif
669 | farmland_133.tif
670 | farmland_133.tif
671 | farmland_134.tif
672 | farmland_134.tif
673 | farmland_134.tif
674 | farmland_134.tif
675 | farmland_134.tif
676 | farmland_135.tif
677 | farmland_135.tif
678 | farmland_135.tif
679 | farmland_135.tif
680 | farmland_135.tif
681 | farmland_136.tif
682 | farmland_136.tif
683 | farmland_136.tif
684 | farmland_136.tif
685 | farmland_136.tif
686 | farmland_137.tif
687 | farmland_137.tif
688 | farmland_137.tif
689 | farmland_137.tif
690 | farmland_137.tif
691 | farmland_138.tif
692 | farmland_138.tif
693 | farmland_138.tif
694 | farmland_138.tif
695 | farmland_138.tif
696 | farmland_139.tif
697 | farmland_139.tif
698 | farmland_139.tif
699 | farmland_139.tif
700 | farmland_139.tif
701 | farmland_140.tif
702 | farmland_140.tif
703 | farmland_140.tif
704 | farmland_140.tif
705 | farmland_140.tif
706 | forest_141.tif
707 | forest_141.tif
708 | forest_141.tif
709 | forest_141.tif
710 | forest_141.tif
711 | forest_142.tif
712 | forest_142.tif
713 | forest_142.tif
714 | forest_142.tif
715 | forest_142.tif
716 | forest_143.tif
717 | forest_143.tif
718 | forest_143.tif
719 | forest_143.tif
720 | forest_143.tif
721 | forest_144.tif
722 | forest_144.tif
723 | forest_144.tif
724 | forest_144.tif
725 | forest_144.tif
726 | forest_145.tif
727 | forest_145.tif
728 | forest_145.tif
729 | forest_145.tif
730 | forest_145.tif
731 | forest_146.tif
732 | forest_146.tif
733 | forest_146.tif
734 | forest_146.tif
735 | forest_146.tif
736 | forest_147.tif
737 | forest_147.tif
738 | forest_147.tif
739 | forest_147.tif
740 | forest_147.tif
741 | forest_148.tif
742 | forest_148.tif
743 | forest_148.tif
744 | forest_148.tif
745 | forest_148.tif
746 | forest_149.tif
747 | forest_149.tif
748 | forest_149.tif
749 | forest_149.tif
750 | forest_149.tif
751 | forest_150.tif
752 | forest_150.tif
753 | forest_150.tif
754 | forest_150.tif
755 | forest_150.tif
756 | forest_151.tif
757 | forest_151.tif
758 | forest_151.tif
759 | forest_151.tif
760 | forest_151.tif
761 | industrial_152.tif
762 | industrial_152.tif
763 | industrial_152.tif
764 | industrial_152.tif
765 | industrial_152.tif
766 | industrial_153.tif
767 | industrial_153.tif
768 | industrial_153.tif
769 | industrial_153.tif
770 | industrial_153.tif
771 | industrial_154.tif
772 | industrial_154.tif
773 | industrial_154.tif
774 | industrial_154.tif
775 | industrial_154.tif
776 | industrial_155.tif
777 | industrial_155.tif
778 | industrial_155.tif
779 | industrial_155.tif
780 | industrial_155.tif
781 | industrial_156.tif
782 | industrial_156.tif
783 | industrial_156.tif
784 | industrial_156.tif
785 | industrial_156.tif
786 | industrial_157.tif
787 | industrial_157.tif
788 | industrial_157.tif
789 | industrial_157.tif
790 | industrial_157.tif
791 | industrial_158.tif
792 | industrial_158.tif
793 | industrial_158.tif
794 | industrial_158.tif
795 | industrial_158.tif
796 | industrial_159.tif
797 | industrial_159.tif
798 | industrial_159.tif
799 | industrial_159.tif
800 | industrial_159.tif
801 | industrial_160.tif
802 | industrial_160.tif
803 | industrial_160.tif
804 | industrial_160.tif
805 | industrial_160.tif
806 | industrial_161.tif
807 | industrial_161.tif
808 | industrial_161.tif
809 | industrial_161.tif
810 | industrial_161.tif
811 | industrial_162.tif
812 | industrial_162.tif
813 | industrial_162.tif
814 | industrial_162.tif
815 | industrial_162.tif
816 | industrial_163.tif
817 | industrial_163.tif
818 | industrial_163.tif
819 | industrial_163.tif
820 | industrial_163.tif
821 | industrial_164.tif
822 | industrial_164.tif
823 | industrial_164.tif
824 | industrial_164.tif
825 | industrial_164.tif
826 | industrial_165.tif
827 | industrial_165.tif
828 | industrial_165.tif
829 | industrial_165.tif
830 | industrial_165.tif
831 | industrial_166.tif
832 | industrial_166.tif
833 | industrial_166.tif
834 | industrial_166.tif
835 | industrial_166.tif
836 | industrial_167.tif
837 | industrial_167.tif
838 | industrial_167.tif
839 | industrial_167.tif
840 | industrial_167.tif
841 | industrial_168.tif
842 | industrial_168.tif
843 | industrial_168.tif
844 | industrial_168.tif
845 | industrial_168.tif
846 | industrial_169.tif
847 | industrial_169.tif
848 | industrial_169.tif
849 | industrial_169.tif
850 | industrial_169.tif
851 | industrial_170.tif
852 | industrial_170.tif
853 | industrial_170.tif
854 | industrial_170.tif
855 | industrial_170.tif
856 | industrial_171.tif
857 | industrial_171.tif
858 | industrial_171.tif
859 | industrial_171.tif
860 | industrial_171.tif
861 | meadow_172.tif
862 | meadow_172.tif
863 | meadow_172.tif
864 | meadow_172.tif
865 | meadow_172.tif
866 | meadow_173.tif
867 | meadow_173.tif
868 | meadow_173.tif
869 | meadow_173.tif
870 | meadow_173.tif
871 | meadow_174.tif
872 | meadow_174.tif
873 | meadow_174.tif
874 | meadow_174.tif
875 | meadow_174.tif
876 | meadow_175.tif
877 | meadow_175.tif
878 | meadow_175.tif
879 | meadow_175.tif
880 | meadow_175.tif
881 | meadow_176.tif
882 | meadow_176.tif
883 | meadow_176.tif
884 | meadow_176.tif
885 | meadow_176.tif
886 | meadow_177.tif
887 | meadow_177.tif
888 | meadow_177.tif
889 | meadow_177.tif
890 | meadow_177.tif
891 | meadow_178.tif
892 | meadow_178.tif
893 | meadow_178.tif
894 | meadow_178.tif
895 | meadow_178.tif
896 | meadow_179.tif
897 | meadow_179.tif
898 | meadow_179.tif
899 | meadow_179.tif
900 | meadow_179.tif
901 | meadow_180.tif
902 | meadow_180.tif
903 | meadow_180.tif
904 | meadow_180.tif
905 | meadow_180.tif
906 | meadow_181.tif
907 | meadow_181.tif
908 | meadow_181.tif
909 | meadow_181.tif
910 | meadow_181.tif
911 | meadow_182.tif
912 | meadow_182.tif
913 | meadow_182.tif
914 | meadow_182.tif
915 | meadow_182.tif
916 | meadow_183.tif
917 | meadow_183.tif
918 | meadow_183.tif
919 | meadow_183.tif
920 | meadow_183.tif
921 | mediumresidential_184.tif
922 | mediumresidential_184.tif
923 | mediumresidential_184.tif
924 | mediumresidential_184.tif
925 | mediumresidential_184.tif
926 | mediumresidential_185.tif
927 | mediumresidential_185.tif
928 | mediumresidential_185.tif
929 | mediumresidential_185.tif
930 | mediumresidential_185.tif
931 | mediumresidential_186.tif
932 | mediumresidential_186.tif
933 | mediumresidential_186.tif
934 | mediumresidential_186.tif
935 | mediumresidential_186.tif
936 | mediumresidential_187.tif
937 | mediumresidential_187.tif
938 | mediumresidential_187.tif
939 | mediumresidential_187.tif
940 | mediumresidential_187.tif
941 | mediumresidential_188.tif
942 | mediumresidential_188.tif
943 | mediumresidential_188.tif
944 | mediumresidential_188.tif
945 | mediumresidential_188.tif
946 | mediumresidential_189.tif
947 | mediumresidential_189.tif
948 | mediumresidential_189.tif
949 | mediumresidential_189.tif
950 | mediumresidential_189.tif
951 | mediumresidential_190.tif
952 | mediumresidential_190.tif
953 | mediumresidential_190.tif
954 | mediumresidential_190.tif
955 | mediumresidential_190.tif
956 | mediumresidential_191.tif
957 | mediumresidential_191.tif
958 | mediumresidential_191.tif
959 | mediumresidential_191.tif
960 | mediumresidential_191.tif
961 | mediumresidential_192.tif
962 | mediumresidential_192.tif
963 | mediumresidential_192.tif
964 | mediumresidential_192.tif
965 | mediumresidential_192.tif
966 | mediumresidential_193.tif
967 | mediumresidential_193.tif
968 | mediumresidential_193.tif
969 | mediumresidential_193.tif
970 | mediumresidential_193.tif
971 | mediumresidential_194.tif
972 | mediumresidential_194.tif
973 | mediumresidential_194.tif
974 | mediumresidential_194.tif
975 | mediumresidential_194.tif
976 | mediumresidential_195.tif
977 | mediumresidential_195.tif
978 | mediumresidential_195.tif
979 | mediumresidential_195.tif
980 | mediumresidential_195.tif
981 | mediumresidential_196.tif
982 | mediumresidential_196.tif
983 | mediumresidential_196.tif
984 | mediumresidential_196.tif
985 | mediumresidential_196.tif
986 | mountain_197.tif
987 | mountain_197.tif
988 | mountain_197.tif
989 | mountain_197.tif
990 | mountain_197.tif
991 | mountain_198.tif
992 | mountain_198.tif
993 | mountain_198.tif
994 | mountain_198.tif
995 | mountain_198.tif
996 | mountain_199.tif
997 | mountain_199.tif
998 | mountain_199.tif
999 | mountain_199.tif
1000 | mountain_199.tif
1001 | mountain_200.tif
1002 | mountain_200.tif
1003 | mountain_200.tif
1004 | mountain_200.tif
1005 | mountain_200.tif
1006 | mountain_201.tif
1007 | mountain_201.tif
1008 | mountain_201.tif
1009 | mountain_201.tif
1010 | mountain_201.tif
1011 | mountain_202.tif
1012 | mountain_202.tif
1013 | mountain_202.tif
1014 | mountain_202.tif
1015 | mountain_202.tif
1016 | mountain_203.tif
1017 | mountain_203.tif
1018 | mountain_203.tif
1019 | mountain_203.tif
1020 | mountain_203.tif
1021 | mountain_204.tif
1022 | mountain_204.tif
1023 | mountain_204.tif
1024 | mountain_204.tif
1025 | mountain_204.tif
1026 | mountain_205.tif
1027 | mountain_205.tif
1028 | mountain_205.tif
1029 | mountain_205.tif
1030 | mountain_205.tif
1031 | mountain_206.tif
1032 | mountain_206.tif
1033 | mountain_206.tif
1034 | mountain_206.tif
1035 | mountain_206.tif
1036 | park_207.tif
1037 | park_207.tif
1038 | park_207.tif
1039 | park_207.tif
1040 | park_207.tif
1041 | park_208.tif
1042 | park_208.tif
1043 | park_208.tif
1044 | park_208.tif
1045 | park_208.tif
1046 | park_209.tif
1047 | park_209.tif
1048 | park_209.tif
1049 | park_209.tif
1050 | park_209.tif
1051 | park_210.tif
1052 | park_210.tif
1053 | park_210.tif
1054 | park_210.tif
1055 | park_210.tif
1056 | park_211.tif
1057 | park_211.tif
1058 | park_211.tif
1059 | park_211.tif
1060 | park_211.tif
1061 | park_212.tif
1062 | park_212.tif
1063 | park_212.tif
1064 | park_212.tif
1065 | park_212.tif
1066 | park_213.tif
1067 | park_213.tif
1068 | park_213.tif
1069 | park_213.tif
1070 | park_213.tif
1071 | park_214.tif
1072 | park_214.tif
1073 | park_214.tif
1074 | park_214.tif
1075 | park_214.tif
1076 | park_215.tif
1077 | park_215.tif
1078 | park_215.tif
1079 | park_215.tif
1080 | park_215.tif
1081 | park_216.tif
1082 | park_216.tif
1083 | park_216.tif
1084 | park_216.tif
1085 | park_216.tif
1086 | park_217.tif
1087 | park_217.tif
1088 | park_217.tif
1089 | park_217.tif
1090 | park_217.tif
1091 | park_218.tif
1092 | park_218.tif
1093 | park_218.tif
1094 | park_218.tif
1095 | park_218.tif
1096 | park_219.tif
1097 | park_219.tif
1098 | park_219.tif
1099 | park_219.tif
1100 | park_219.tif
1101 | park_220.tif
1102 | park_220.tif
1103 | park_220.tif
1104 | park_220.tif
1105 | park_220.tif
1106 | park_221.tif
1107 | park_221.tif
1108 | park_221.tif
1109 | park_221.tif
1110 | park_221.tif
1111 | park_222.tif
1112 | park_222.tif
1113 | park_222.tif
1114 | park_222.tif
1115 | park_222.tif
1116 | parking_223.tif
1117 | parking_223.tif
1118 | parking_223.tif
1119 | parking_223.tif
1120 | parking_223.tif
1121 | parking_224.tif
1122 | parking_224.tif
1123 | parking_224.tif
1124 | parking_224.tif
1125 | parking_224.tif
1126 | parking_225.tif
1127 | parking_225.tif
1128 | parking_225.tif
1129 | parking_225.tif
1130 | parking_225.tif
1131 | parking_226.tif
1132 | parking_226.tif
1133 | parking_226.tif
1134 | parking_226.tif
1135 | parking_226.tif
1136 | parking_227.tif
1137 | parking_227.tif
1138 | parking_227.tif
1139 | parking_227.tif
1140 | parking_227.tif
1141 | parking_228.tif
1142 | parking_228.tif
1143 | parking_228.tif
1144 | parking_228.tif
1145 | parking_228.tif
1146 | parking_229.tif
1147 | parking_229.tif
1148 | parking_229.tif
1149 | parking_229.tif
1150 | parking_229.tif
1151 | parking_230.tif
1152 | parking_230.tif
1153 | parking_230.tif
1154 | parking_230.tif
1155 | parking_230.tif
1156 | parking_231.tif
1157 | parking_231.tif
1158 | parking_231.tif
1159 | parking_231.tif
1160 | parking_231.tif
1161 | parking_232.tif
1162 | parking_232.tif
1163 | parking_232.tif
1164 | parking_232.tif
1165 | parking_232.tif
1166 | parking_233.tif
1167 | parking_233.tif
1168 | parking_233.tif
1169 | parking_233.tif
1170 | parking_233.tif
1171 | parking_234.tif
1172 | parking_234.tif
1173 | parking_234.tif
1174 | parking_234.tif
1175 | parking_234.tif
1176 | parking_235.tif
1177 | parking_235.tif
1178 | parking_235.tif
1179 | parking_235.tif
1180 | parking_235.tif
1181 | parking_236.tif
1182 | parking_236.tif
1183 | parking_236.tif
1184 | parking_236.tif
1185 | parking_236.tif
1186 | playground_237.tif
1187 | playground_237.tif
1188 | playground_237.tif
1189 | playground_237.tif
1190 | playground_237.tif
1191 | playground_238.tif
1192 | playground_238.tif
1193 | playground_238.tif
1194 | playground_238.tif
1195 | playground_238.tif
1196 | playground_239.tif
1197 | playground_239.tif
1198 | playground_239.tif
1199 | playground_239.tif
1200 | playground_239.tif
1201 | playground_240.tif
1202 | playground_240.tif
1203 | playground_240.tif
1204 | playground_240.tif
1205 | playground_240.tif
1206 | playground_241.tif
1207 | playground_241.tif
1208 | playground_241.tif
1209 | playground_241.tif
1210 | playground_241.tif
1211 | playground_242.tif
1212 | playground_242.tif
1213 | playground_242.tif
1214 | playground_242.tif
1215 | playground_242.tif
1216 | playground_243.tif
1217 | playground_243.tif
1218 | playground_243.tif
1219 | playground_243.tif
1220 | playground_243.tif
1221 | playground_244.tif
1222 | playground_244.tif
1223 | playground_244.tif
1224 | playground_244.tif
1225 | playground_244.tif
1226 | playground_245.tif
1227 | playground_245.tif
1228 | playground_245.tif
1229 | playground_245.tif
1230 | playground_245.tif
1231 | playground_246.tif
1232 | playground_246.tif
1233 | playground_246.tif
1234 | playground_246.tif
1235 | playground_246.tif
1236 | playground_247.tif
1237 | playground_247.tif
1238 | playground_247.tif
1239 | playground_247.tif
1240 | playground_247.tif
1241 | playground_248.tif
1242 | playground_248.tif
1243 | playground_248.tif
1244 | playground_248.tif
1245 | playground_248.tif
1246 | playground_249.tif
1247 | playground_249.tif
1248 | playground_249.tif
1249 | playground_249.tif
1250 | playground_249.tif
1251 | playground_250.tif
1252 | playground_250.tif
1253 | playground_250.tif
1254 | playground_250.tif
1255 | playground_250.tif
1256 | playground_251.tif
1257 | playground_251.tif
1258 | playground_251.tif
1259 | playground_251.tif
1260 | playground_251.tif
1261 | playground_252.tif
1262 | playground_252.tif
1263 | playground_252.tif
1264 | playground_252.tif
1265 | playground_252.tif
1266 | pond_253.tif
1267 | pond_253.tif
1268 | pond_253.tif
1269 | pond_253.tif
1270 | pond_253.tif
1271 | pond_254.tif
1272 | pond_254.tif
1273 | pond_254.tif
1274 | pond_254.tif
1275 | pond_254.tif
1276 | pond_255.tif
1277 | pond_255.tif
1278 | pond_255.tif
1279 | pond_255.tif
1280 | pond_255.tif
1281 | pond_256.tif
1282 | pond_256.tif
1283 | pond_256.tif
1284 | pond_256.tif
1285 | pond_256.tif
1286 | pond_257.tif
1287 | pond_257.tif
1288 | pond_257.tif
1289 | pond_257.tif
1290 | pond_257.tif
1291 | pond_258.tif
1292 | pond_258.tif
1293 | pond_258.tif
1294 | pond_258.tif
1295 | pond_258.tif
1296 | pond_259.tif
1297 | pond_259.tif
1298 | pond_259.tif
1299 | pond_259.tif
1300 | pond_259.tif
1301 | pond_260.tif
1302 | pond_260.tif
1303 | pond_260.tif
1304 | pond_260.tif
1305 | pond_260.tif
1306 | pond_261.tif
1307 | pond_261.tif
1308 | pond_261.tif
1309 | pond_261.tif
1310 | pond_261.tif
1311 | pond_262.tif
1312 | pond_262.tif
1313 | pond_262.tif
1314 | pond_262.tif
1315 | pond_262.tif
1316 | pond_263.tif
1317 | pond_263.tif
1318 | pond_263.tif
1319 | pond_263.tif
1320 | pond_263.tif
1321 | pond_264.tif
1322 | pond_264.tif
1323 | pond_264.tif
1324 | pond_264.tif
1325 | pond_264.tif
1326 | pond_265.tif
1327 | pond_265.tif
1328 | pond_265.tif
1329 | pond_265.tif
1330 | pond_265.tif
1331 | pond_266.tif
1332 | pond_266.tif
1333 | pond_266.tif
1334 | pond_266.tif
1335 | pond_266.tif
1336 | pond_267.tif
1337 | pond_267.tif
1338 | pond_267.tif
1339 | pond_267.tif
1340 | pond_267.tif
1341 | pond_268.tif
1342 | pond_268.tif
1343 | pond_268.tif
1344 | pond_268.tif
1345 | pond_268.tif
1346 | pond_269.tif
1347 | pond_269.tif
1348 | pond_269.tif
1349 | pond_269.tif
1350 | pond_269.tif
1351 | pond_270.tif
1352 | pond_270.tif
1353 | pond_270.tif
1354 | pond_270.tif
1355 | pond_270.tif
1356 | pond_271.tif
1357 | pond_271.tif
1358 | pond_271.tif
1359 | pond_271.tif
1360 | pond_271.tif
1361 | pond_272.tif
1362 | pond_272.tif
1363 | pond_272.tif
1364 | pond_272.tif
1365 | pond_272.tif
1366 | pond_273.tif
1367 | pond_273.tif
1368 | pond_273.tif
1369 | pond_273.tif
1370 | pond_273.tif
1371 | pond_274.tif
1372 | pond_274.tif
1373 | pond_274.tif
1374 | pond_274.tif
1375 | pond_274.tif
1376 | port_275.tif
1377 | port_275.tif
1378 | port_275.tif
1379 | port_275.tif
1380 | port_275.tif
1381 | port_276.tif
1382 | port_276.tif
1383 | port_276.tif
1384 | port_276.tif
1385 | port_276.tif
1386 | port_277.tif
1387 | port_277.tif
1388 | port_277.tif
1389 | port_277.tif
1390 | port_277.tif
1391 | port_278.tif
1392 | port_278.tif
1393 | port_278.tif
1394 | port_278.tif
1395 | port_278.tif
1396 | port_279.tif
1397 | port_279.tif
1398 | port_279.tif
1399 | port_279.tif
1400 | port_279.tif
1401 | port_280.tif
1402 | port_280.tif
1403 | port_280.tif
1404 | port_280.tif
1405 | port_280.tif
1406 | port_281.tif
1407 | port_281.tif
1408 | port_281.tif
1409 | port_281.tif
1410 | port_281.tif
1411 | port_282.tif
1412 | port_282.tif
1413 | port_282.tif
1414 | port_282.tif
1415 | port_282.tif
1416 | port_283.tif
1417 | port_283.tif
1418 | port_283.tif
1419 | port_283.tif
1420 | port_283.tif
1421 | port_284.tif
1422 | port_284.tif
1423 | port_284.tif
1424 | port_284.tif
1425 | port_284.tif
1426 | port_285.tif
1427 | port_285.tif
1428 | port_285.tif
1429 | port_285.tif
1430 | port_285.tif
1431 | port_286.tif
1432 | port_286.tif
1433 | port_286.tif
1434 | port_286.tif
1435 | port_286.tif
1436 | port_287.tif
1437 | port_287.tif
1438 | port_287.tif
1439 | port_287.tif
1440 | port_287.tif
1441 | port_288.tif
1442 | port_288.tif
1443 | port_288.tif
1444 | port_288.tif
1445 | port_288.tif
1446 | port_289.tif
1447 | port_289.tif
1448 | port_289.tif
1449 | port_289.tif
1450 | port_289.tif
1451 | port_290.tif
1452 | port_290.tif
1453 | port_290.tif
1454 | port_290.tif
1455 | port_290.tif
1456 | port_291.tif
1457 | port_291.tif
1458 | port_291.tif
1459 | port_291.tif
1460 | port_291.tif
1461 | railwaystation_292.tif
1462 | railwaystation_292.tif
1463 | railwaystation_292.tif
1464 | railwaystation_292.tif
1465 | railwaystation_292.tif
1466 | railwaystation_293.tif
1467 | railwaystation_293.tif
1468 | railwaystation_293.tif
1469 | railwaystation_293.tif
1470 | railwaystation_293.tif
1471 | railwaystation_294.tif
1472 | railwaystation_294.tif
1473 | railwaystation_294.tif
1474 | railwaystation_294.tif
1475 | railwaystation_294.tif
1476 | railwaystation_295.tif
1477 | railwaystation_295.tif
1478 | railwaystation_295.tif
1479 | railwaystation_295.tif
1480 | railwaystation_295.tif
1481 | railwaystation_296.tif
1482 | railwaystation_296.tif
1483 | railwaystation_296.tif
1484 | railwaystation_296.tif
1485 | railwaystation_296.tif
1486 | railwaystation_297.tif
1487 | railwaystation_297.tif
1488 | railwaystation_297.tif
1489 | railwaystation_297.tif
1490 | railwaystation_297.tif
1491 | railwaystation_298.tif
1492 | railwaystation_298.tif
1493 | railwaystation_298.tif
1494 | railwaystation_298.tif
1495 | railwaystation_298.tif
1496 | railwaystation_299.tif
1497 | railwaystation_299.tif
1498 | railwaystation_299.tif
1499 | railwaystation_299.tif
1500 | railwaystation_299.tif
1501 | railwaystation_300.tif
1502 | railwaystation_300.tif
1503 | railwaystation_300.tif
1504 | railwaystation_300.tif
1505 | railwaystation_300.tif
1506 | railwaystation_301.tif
1507 | railwaystation_301.tif
1508 | railwaystation_301.tif
1509 | railwaystation_301.tif
1510 | railwaystation_301.tif
1511 | railwaystation_302.tif
1512 | railwaystation_302.tif
1513 | railwaystation_302.tif
1514 | railwaystation_302.tif
1515 | railwaystation_302.tif
1516 | resort_303.tif
1517 | resort_303.tif
1518 | resort_303.tif
1519 | resort_303.tif
1520 | resort_303.tif
1521 | resort_304.tif
1522 | resort_304.tif
1523 | resort_304.tif
1524 | resort_304.tif
1525 | resort_304.tif
1526 | resort_305.tif
1527 | resort_305.tif
1528 | resort_305.tif
1529 | resort_305.tif
1530 | resort_305.tif
1531 | resort_306.tif
1532 | resort_306.tif
1533 | resort_306.tif
1534 | resort_306.tif
1535 | resort_306.tif
1536 | resort_307.tif
1537 | resort_307.tif
1538 | resort_307.tif
1539 | resort_307.tif
1540 | resort_307.tif
1541 | resort_308.tif
1542 | resort_308.tif
1543 | resort_308.tif
1544 | resort_308.tif
1545 | resort_308.tif
1546 | resort_309.tif
1547 | resort_309.tif
1548 | resort_309.tif
1549 | resort_309.tif
1550 | resort_309.tif
1551 | resort_310.tif
1552 | resort_310.tif
1553 | resort_310.tif
1554 | resort_310.tif
1555 | resort_310.tif
1556 | resort_311.tif
1557 | resort_311.tif
1558 | resort_311.tif
1559 | resort_311.tif
1560 | resort_311.tif
1561 | resort_312.tif
1562 | resort_312.tif
1563 | resort_312.tif
1564 | resort_312.tif
1565 | resort_312.tif
1566 | resort_313.tif
1567 | resort_313.tif
1568 | resort_313.tif
1569 | resort_313.tif
1570 | resort_313.tif
1571 | resort_314.tif
1572 | resort_314.tif
1573 | resort_314.tif
1574 | resort_314.tif
1575 | resort_314.tif
1576 | resort_315.tif
1577 | resort_315.tif
1578 | resort_315.tif
1579 | resort_315.tif
1580 | resort_315.tif
1581 | resort_316.tif
1582 | resort_316.tif
1583 | resort_316.tif
1584 | resort_316.tif
1585 | resort_316.tif
1586 | resort_317.tif
1587 | resort_317.tif
1588 | resort_317.tif
1589 | resort_317.tif
1590 | resort_317.tif
1591 | resort_318.tif
1592 | resort_318.tif
1593 | resort_318.tif
1594 | resort_318.tif
1595 | resort_318.tif
1596 | river_319.tif
1597 | river_319.tif
1598 | river_319.tif
1599 | river_319.tif
1600 | river_319.tif
1601 | river_320.tif
1602 | river_320.tif
1603 | river_320.tif
1604 | river_320.tif
1605 | river_320.tif
1606 | river_321.tif
1607 | river_321.tif
1608 | river_321.tif
1609 | river_321.tif
1610 | river_321.tif
1611 | river_322.tif
1612 | river_322.tif
1613 | river_322.tif
1614 | river_322.tif
1615 | river_322.tif
1616 | river_323.tif
1617 | river_323.tif
1618 | river_323.tif
1619 | river_323.tif
1620 | river_323.tif
1621 | river_324.tif
1622 | river_324.tif
1623 | river_324.tif
1624 | river_324.tif
1625 | river_324.tif
1626 | river_325.tif
1627 | river_325.tif
1628 | river_325.tif
1629 | river_325.tif
1630 | river_325.tif
1631 | river_326.tif
1632 | river_326.tif
1633 | river_326.tif
1634 | river_326.tif
1635 | river_326.tif
1636 | river_327.tif
1637 | river_327.tif
1638 | river_327.tif
1639 | river_327.tif
1640 | river_327.tif
1641 | river_328.tif
1642 | river_328.tif
1643 | river_328.tif
1644 | river_328.tif
1645 | river_328.tif
1646 | river_329.tif
1647 | river_329.tif
1648 | river_329.tif
1649 | river_329.tif
1650 | river_329.tif
1651 | river_330.tif
1652 | river_330.tif
1653 | river_330.tif
1654 | river_330.tif
1655 | river_330.tif
1656 | river_331.tif
1657 | river_331.tif
1658 | river_331.tif
1659 | river_331.tif
1660 | river_331.tif
1661 | river_332.tif
1662 | river_332.tif
1663 | river_332.tif
1664 | river_332.tif
1665 | river_332.tif
1666 | river_333.tif
1667 | river_333.tif
1668 | river_333.tif
1669 | river_333.tif
1670 | river_333.tif
1671 | river_334.tif
1672 | river_334.tif
1673 | river_334.tif
1674 | river_334.tif
1675 | river_334.tif
1676 | river_335.tif
1677 | river_335.tif
1678 | river_335.tif
1679 | river_335.tif
1680 | river_335.tif
1681 | river_336.tif
1682 | river_336.tif
1683 | river_336.tif
1684 | river_336.tif
1685 | river_336.tif
1686 | school_337.tif
1687 | school_337.tif
1688 | school_337.tif
1689 | school_337.tif
1690 | school_337.tif
1691 | school_338.tif
1692 | school_338.tif
1693 | school_338.tif
1694 | school_338.tif
1695 | school_338.tif
1696 | school_339.tif
1697 | school_339.tif
1698 | school_339.tif
1699 | school_339.tif
1700 | school_339.tif
1701 | school_340.tif
1702 | school_340.tif
1703 | school_340.tif
1704 | school_340.tif
1705 | school_340.tif
1706 | school_341.tif
1707 | school_341.tif
1708 | school_341.tif
1709 | school_341.tif
1710 | school_341.tif
1711 | school_342.tif
1712 | school_342.tif
1713 | school_342.tif
1714 | school_342.tif
1715 | school_342.tif
1716 | school_343.tif
1717 | school_343.tif
1718 | school_343.tif
1719 | school_343.tif
1720 | school_343.tif
1721 | school_344.tif
1722 | school_344.tif
1723 | school_344.tif
1724 | school_344.tif
1725 | school_344.tif
1726 | school_345.tif
1727 | school_345.tif
1728 | school_345.tif
1729 | school_345.tif
1730 | school_345.tif
1731 | school_346.tif
1732 | school_346.tif
1733 | school_346.tif
1734 | school_346.tif
1735 | school_346.tif
1736 | school_347.tif
1737 | school_347.tif
1738 | school_347.tif
1739 | school_347.tif
1740 | school_347.tif
1741 | school_348.tif
1742 | school_348.tif
1743 | school_348.tif
1744 | school_348.tif
1745 | school_348.tif
1746 | school_349.tif
1747 | school_349.tif
1748 | school_349.tif
1749 | school_349.tif
1750 | school_349.tif
1751 | school_350.tif
1752 | school_350.tif
1753 | school_350.tif
1754 | school_350.tif
1755 | school_350.tif
1756 | school_351.tif
1757 | school_351.tif
1758 | school_351.tif
1759 | school_351.tif
1760 | school_351.tif
1761 | school_352.tif
1762 | school_352.tif
1763 | school_352.tif
1764 | school_352.tif
1765 | school_352.tif
1766 | sparseresidential_353.tif
1767 | sparseresidential_353.tif
1768 | sparseresidential_353.tif
1769 | sparseresidential_353.tif
1770 | sparseresidential_353.tif
1771 | sparseresidential_354.tif
1772 | sparseresidential_354.tif
1773 | sparseresidential_354.tif
1774 | sparseresidential_354.tif
1775 | sparseresidential_354.tif
1776 | sparseresidential_355.tif
1777 | sparseresidential_355.tif
1778 | sparseresidential_355.tif
1779 | sparseresidential_355.tif
1780 | sparseresidential_355.tif
1781 | sparseresidential_356.tif
1782 | sparseresidential_356.tif
1783 | sparseresidential_356.tif
1784 | sparseresidential_356.tif
1785 | sparseresidential_356.tif
1786 | sparseresidential_357.tif
1787 | sparseresidential_357.tif
1788 | sparseresidential_357.tif
1789 | sparseresidential_357.tif
1790 | sparseresidential_357.tif
1791 | sparseresidential_358.tif
1792 | sparseresidential_358.tif
1793 | sparseresidential_358.tif
1794 | sparseresidential_358.tif
1795 | sparseresidential_358.tif
1796 | sparseresidential_359.tif
1797 | sparseresidential_359.tif
1798 | sparseresidential_359.tif
1799 | sparseresidential_359.tif
1800 | sparseresidential_359.tif
1801 | sparseresidential_360.tif
1802 | sparseresidential_360.tif
1803 | sparseresidential_360.tif
1804 | sparseresidential_360.tif
1805 | sparseresidential_360.tif
1806 | sparseresidential_361.tif
1807 | sparseresidential_361.tif
1808 | sparseresidential_361.tif
1809 | sparseresidential_361.tif
1810 | sparseresidential_361.tif
1811 | sparseresidential_362.tif
1812 | sparseresidential_362.tif
1813 | sparseresidential_362.tif
1814 | sparseresidential_362.tif
1815 | sparseresidential_362.tif
1816 | sparseresidential_363.tif
1817 | sparseresidential_363.tif
1818 | sparseresidential_363.tif
1819 | sparseresidential_363.tif
1820 | sparseresidential_363.tif
1821 | sparseresidential_364.tif
1822 | sparseresidential_364.tif
1823 | sparseresidential_364.tif
1824 | sparseresidential_364.tif
1825 | sparseresidential_364.tif
1826 | sparseresidential_365.tif
1827 | sparseresidential_365.tif
1828 | sparseresidential_365.tif
1829 | sparseresidential_365.tif
1830 | sparseresidential_365.tif
1831 | sparseresidential_366.tif
1832 | sparseresidential_366.tif
1833 | sparseresidential_366.tif
1834 | sparseresidential_366.tif
1835 | sparseresidential_366.tif
1836 | sparseresidential_367.tif
1837 | sparseresidential_367.tif
1838 | sparseresidential_367.tif
1839 | sparseresidential_367.tif
1840 | sparseresidential_367.tif
1841 | square_368.tif
1842 | square_368.tif
1843 | square_368.tif
1844 | square_368.tif
1845 | square_368.tif
1846 | square_369.tif
1847 | square_369.tif
1848 | square_369.tif
1849 | square_369.tif
1850 | square_369.tif
1851 | square_370.tif
1852 | square_370.tif
1853 | square_370.tif
1854 | square_370.tif
1855 | square_370.tif
1856 | square_371.tif
1857 | square_371.tif
1858 | square_371.tif
1859 | square_371.tif
1860 | square_371.tif
1861 | square_372.tif
1862 | square_372.tif
1863 | square_372.tif
1864 | square_372.tif
1865 | square_372.tif
1866 | square_373.tif
1867 | square_373.tif
1868 | square_373.tif
1869 | square_373.tif
1870 | square_373.tif
1871 | square_374.tif
1872 | square_374.tif
1873 | square_374.tif
1874 | square_374.tif
1875 | square_374.tif
1876 | square_375.tif
1877 | square_375.tif
1878 | square_375.tif
1879 | square_375.tif
1880 | square_375.tif
1881 | square_376.tif
1882 | square_376.tif
1883 | square_376.tif
1884 | square_376.tif
1885 | square_376.tif
1886 | square_377.tif
1887 | square_377.tif
1888 | square_377.tif
1889 | square_377.tif
1890 | square_377.tif
1891 | square_378.tif
1892 | square_378.tif
1893 | square_378.tif
1894 | square_378.tif
1895 | square_378.tif
1896 | square_379.tif
1897 | square_379.tif
1898 | square_379.tif
1899 | square_379.tif
1900 | square_379.tif
1901 | square_380.tif
1902 | square_380.tif
1903 | square_380.tif
1904 | square_380.tif
1905 | square_380.tif
1906 | square_381.tif
1907 | square_381.tif
1908 | square_381.tif
1909 | square_381.tif
1910 | square_381.tif
1911 | square_382.tif
1912 | square_382.tif
1913 | square_382.tif
1914 | square_382.tif
1915 | square_382.tif
1916 | square_383.tif
1917 | square_383.tif
1918 | square_383.tif
1919 | square_383.tif
1920 | square_383.tif
1921 | square_384.tif
1922 | square_384.tif
1923 | square_384.tif
1924 | square_384.tif
1925 | square_384.tif
1926 | square_385.tif
1927 | square_385.tif
1928 | square_385.tif
1929 | square_385.tif
1930 | square_385.tif
1931 | square_386.tif
1932 | square_386.tif
1933 | square_386.tif
1934 | square_386.tif
1935 | square_386.tif
1936 | stadium_387.tif
1937 | stadium_387.tif
1938 | stadium_387.tif
1939 | stadium_387.tif
1940 | stadium_387.tif
1941 | stadium_388.tif
1942 | stadium_388.tif
1943 | stadium_388.tif
1944 | stadium_388.tif
1945 | stadium_388.tif
1946 | stadium_389.tif
1947 | stadium_389.tif
1948 | stadium_389.tif
1949 | stadium_389.tif
1950 | stadium_389.tif
1951 | stadium_390.tif
1952 | stadium_390.tif
1953 | stadium_390.tif
1954 | stadium_390.tif
1955 | stadium_390.tif
1956 | stadium_391.tif
1957 | stadium_391.tif
1958 | stadium_391.tif
1959 | stadium_391.tif
1960 | stadium_391.tif
1961 | stadium_392.tif
1962 | stadium_392.tif
1963 | stadium_392.tif
1964 | stadium_392.tif
1965 | stadium_392.tif
1966 | stadium_393.tif
1967 | stadium_393.tif
1968 | stadium_393.tif
1969 | stadium_393.tif
1970 | stadium_393.tif
1971 | stadium_394.tif
1972 | stadium_394.tif
1973 | stadium_394.tif
1974 | stadium_394.tif
1975 | stadium_394.tif
1976 | stadium_395.tif
1977 | stadium_395.tif
1978 | stadium_395.tif
1979 | stadium_395.tif
1980 | stadium_395.tif
1981 | stadium_396.tif
1982 | stadium_396.tif
1983 | stadium_396.tif
1984 | stadium_396.tif
1985 | stadium_396.tif
1986 | stadium_397.tif
1987 | stadium_397.tif
1988 | stadium_397.tif
1989 | stadium_397.tif
1990 | stadium_397.tif
1991 | stadium_398.tif
1992 | stadium_398.tif
1993 | stadium_398.tif
1994 | stadium_398.tif
1995 | stadium_398.tif
1996 | stadium_399.tif
1997 | stadium_399.tif
1998 | stadium_399.tif
1999 | stadium_399.tif
2000 | stadium_399.tif
2001 | stadium_400.tif
2002 | stadium_400.tif
2003 | stadium_400.tif
2004 | stadium_400.tif
2005 | stadium_400.tif
2006 | stadium_401.tif
2007 | stadium_401.tif
2008 | stadium_401.tif
2009 | stadium_401.tif
2010 | stadium_401.tif
2011 | stadium_402.tif
2012 | stadium_402.tif
2013 | stadium_402.tif
2014 | stadium_402.tif
2015 | stadium_402.tif
2016 | stadium_403.tif
2017 | stadium_403.tif
2018 | stadium_403.tif
2019 | stadium_403.tif
2020 | stadium_403.tif
2021 | stadium_404.tif
2022 | stadium_404.tif
2023 | stadium_404.tif
2024 | stadium_404.tif
2025 | stadium_404.tif
2026 | stadium_405.tif
2027 | stadium_405.tif
2028 | stadium_405.tif
2029 | stadium_405.tif
2030 | stadium_405.tif
2031 | storagetanks_406.tif
2032 | storagetanks_406.tif
2033 | storagetanks_406.tif
2034 | storagetanks_406.tif
2035 | storagetanks_406.tif
2036 | storagetanks_407.tif
2037 | storagetanks_407.tif
2038 | storagetanks_407.tif
2039 | storagetanks_407.tif
2040 | storagetanks_407.tif
2041 | storagetanks_408.tif
2042 | storagetanks_408.tif
2043 | storagetanks_408.tif
2044 | storagetanks_408.tif
2045 | storagetanks_408.tif
2046 | storagetanks_409.tif
2047 | storagetanks_409.tif
2048 | storagetanks_409.tif
2049 | storagetanks_409.tif
2050 | storagetanks_409.tif
2051 | storagetanks_410.tif
2052 | storagetanks_410.tif
2053 | storagetanks_410.tif
2054 | storagetanks_410.tif
2055 | storagetanks_410.tif
2056 | storagetanks_411.tif
2057 | storagetanks_411.tif
2058 | storagetanks_411.tif
2059 | storagetanks_411.tif
2060 | storagetanks_411.tif
2061 | storagetanks_412.tif
2062 | storagetanks_412.tif
2063 | storagetanks_412.tif
2064 | storagetanks_412.tif
2065 | storagetanks_412.tif
2066 | storagetanks_413.tif
2067 | storagetanks_413.tif
2068 | storagetanks_413.tif
2069 | storagetanks_413.tif
2070 | storagetanks_413.tif
2071 | storagetanks_414.tif
2072 | storagetanks_414.tif
2073 | storagetanks_414.tif
2074 | storagetanks_414.tif
2075 | storagetanks_414.tif
2076 | storagetanks_415.tif
2077 | storagetanks_415.tif
2078 | storagetanks_415.tif
2079 | storagetanks_415.tif
2080 | storagetanks_415.tif
2081 | storagetanks_416.tif
2082 | storagetanks_416.tif
2083 | storagetanks_416.tif
2084 | storagetanks_416.tif
2085 | storagetanks_416.tif
2086 | storagetanks_417.tif
2087 | storagetanks_417.tif
2088 | storagetanks_417.tif
2089 | storagetanks_417.tif
2090 | storagetanks_417.tif
2091 | storagetanks_418.tif
2092 | storagetanks_418.tif
2093 | storagetanks_418.tif
2094 | storagetanks_418.tif
2095 | storagetanks_418.tif
2096 | storagetanks_419.tif
2097 | storagetanks_419.tif
2098 | storagetanks_419.tif
2099 | storagetanks_419.tif
2100 | storagetanks_419.tif
2101 | storagetanks_420.tif
2102 | storagetanks_420.tif
2103 | storagetanks_420.tif
2104 | storagetanks_420.tif
2105 | storagetanks_420.tif
2106 | storagetanks_421.tif
2107 | storagetanks_421.tif
2108 | storagetanks_421.tif
2109 | storagetanks_421.tif
2110 | storagetanks_421.tif
2111 | storagetanks_422.tif
2112 | storagetanks_422.tif
2113 | storagetanks_422.tif
2114 | storagetanks_422.tif
2115 | storagetanks_422.tif
2116 | storagetanks_423.tif
2117 | storagetanks_423.tif
2118 | storagetanks_423.tif
2119 | storagetanks_423.tif
2120 | storagetanks_423.tif
2121 | storagetanks_424.tif
2122 | storagetanks_424.tif
2123 | storagetanks_424.tif
2124 | storagetanks_424.tif
2125 | storagetanks_424.tif
2126 | storagetanks_425.tif
2127 | storagetanks_425.tif
2128 | storagetanks_425.tif
2129 | storagetanks_425.tif
2130 | storagetanks_425.tif
2131 | storagetanks_426.tif
2132 | storagetanks_426.tif
2133 | storagetanks_426.tif
2134 | storagetanks_426.tif
2135 | storagetanks_426.tif
2136 | storagetanks_427.tif
2137 | storagetanks_427.tif
2138 | storagetanks_427.tif
2139 | storagetanks_427.tif
2140 | storagetanks_427.tif
2141 | storagetanks_428.tif
2142 | storagetanks_428.tif
2143 | storagetanks_428.tif
2144 | storagetanks_428.tif
2145 | storagetanks_428.tif
2146 | viaduct_429.tif
2147 | viaduct_429.tif
2148 | viaduct_429.tif
2149 | viaduct_429.tif
2150 | viaduct_429.tif
2151 | viaduct_430.tif
2152 | viaduct_430.tif
2153 | viaduct_430.tif
2154 | viaduct_430.tif
2155 | viaduct_430.tif
2156 | viaduct_431.tif
2157 | viaduct_431.tif
2158 | viaduct_431.tif
2159 | viaduct_431.tif
2160 | viaduct_431.tif
2161 | viaduct_432.tif
2162 | viaduct_432.tif
2163 | viaduct_432.tif
2164 | viaduct_432.tif
2165 | viaduct_432.tif
2166 | viaduct_433.tif
2167 | viaduct_433.tif
2168 | viaduct_433.tif
2169 | viaduct_433.tif
2170 | viaduct_433.tif
2171 | viaduct_434.tif
2172 | viaduct_434.tif
2173 | viaduct_434.tif
2174 | viaduct_434.tif
2175 | viaduct_434.tif
2176 | viaduct_435.tif
2177 | viaduct_435.tif
2178 | viaduct_435.tif
2179 | viaduct_435.tif
2180 | viaduct_435.tif
2181 | viaduct_436.tif
2182 | viaduct_436.tif
2183 | viaduct_436.tif
2184 | viaduct_436.tif
2185 | viaduct_436.tif
2186 | viaduct_437.tif
2187 | viaduct_437.tif
2188 | viaduct_437.tif
2189 | viaduct_437.tif
2190 | viaduct_437.tif
2191 | viaduct_438.tif
2192 | viaduct_438.tif
2193 | viaduct_438.tif
2194 | viaduct_438.tif
2195 | viaduct_438.tif
2196 | viaduct_439.tif
2197 | viaduct_439.tif
2198 | viaduct_439.tif
2199 | viaduct_439.tif
2200 | viaduct_439.tif
2201 | viaduct_440.tif
2202 | viaduct_440.tif
2203 | viaduct_440.tif
2204 | viaduct_440.tif
2205 | viaduct_440.tif
2206 | viaduct_441.tif
2207 | viaduct_441.tif
2208 | viaduct_441.tif
2209 | viaduct_441.tif
2210 | viaduct_441.tif
2211 | viaduct_442.tif
2212 | viaduct_442.tif
2213 | viaduct_442.tif
2214 | viaduct_442.tif
2215 | viaduct_442.tif
2216 | viaduct_443.tif
2217 | viaduct_443.tif
2218 | viaduct_443.tif
2219 | viaduct_443.tif
2220 | viaduct_443.tif
2221 | viaduct_444.tif
2222 | viaduct_444.tif
2223 | viaduct_444.tif
2224 | viaduct_444.tif
2225 | viaduct_444.tif
2226 | viaduct_445.tif
2227 | viaduct_445.tif
2228 | viaduct_445.tif
2229 | viaduct_445.tif
2230 | viaduct_445.tif
2231 | viaduct_446.tif
2232 | viaduct_446.tif
2233 | viaduct_446.tif
2234 | viaduct_446.tif
2235 | viaduct_446.tif
2236 | denseresidential_447.tif
2237 | denseresidential_447.tif
2238 | denseresidential_447.tif
2239 | denseresidential_447.tif
2240 | denseresidential_447.tif
2241 | airport_448.tif
2242 | airport_448.tif
2243 | airport_448.tif
2244 | airport_448.tif
2245 | airport_448.tif
2246 | intersection_449.tif
2247 | intersection_449.tif
2248 | intersection_449.tif
2249 | intersection_449.tif
2250 | intersection_449.tif
2251 | playground_450.tif
2252 | playground_450.tif
2253 | playground_450.tif
2254 | playground_450.tif
2255 | playground_450.tif
2256 | storagetanks_451.tif
2257 | storagetanks_451.tif
2258 | storagetanks_451.tif
2259 | storagetanks_451.tif
2260 | storagetanks_451.tif
2261 |
--------------------------------------------------------------------------------
/predict/evaluate_selo.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/05
6 |
7 | import json
8 | import logging
9 | import os
10 | import sys
11 |
12 | sys.path.append("..")
13 | from evaluations.SLM import SLM
14 |
15 | if __name__ == "__main__":
16 | import argparse
17 |
18 | # settings
19 | parser = argparse.ArgumentParser(description="SLM")
20 | parser.add_argument("--yaml_path", type=str, default="option/RSITMD/RSITMD_AMFMN.yaml", help="config yaml path")
21 | parser.add_argument("--cache_path", type=str, default="cache/RSITMD_AMFMN", help="cache path")
22 | parser.add_argument("--src_data_path", type=str, default="../test_data/imgs", help="testset images path")
23 | parser.add_argument("--src_anno_path", type=str, default="../test_data/annotations/anno.json", help="testset annotations path")
24 | opt = parser.parse_args()
25 |
26 | # logging
27 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
28 | logger = logging.getLogger(__name__)
29 |
30 | # start eval
31 | slm_metric = SLM()
32 |
33 | # load from annotations
34 | with open(opt.src_anno_path,'r',encoding='utf8')as fp:
35 | json_data = json.load(fp)
36 |
37 | for idx, item in enumerate(json_data):
38 | # load sample
39 | img = item['jpg_name']
40 | text = item['caption']
41 | probmap_path = os.path.join(opt.cache_path, "probmap_{}.jpg".format(idx))
42 | points = item['points']
43 |
44 | # logging
45 | logger.info("Processing {}/{}: {}".format(idx, len(json_data), img))
46 | logger.info("Corresponding text: {}".format(text))
47 |
48 | # evaluate #
49 | metrics = slm_metric.evaluate(probmap_path, region_list=points)
50 | slm_metric.append_metric(metrics)
51 |
52 | slm_metric.get_the_mean_metric()
--------------------------------------------------------------------------------
/predict/generate_selo.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/03
6 |
7 | import json
8 | import os
9 | import sys
10 | import time
11 |
12 | import cv2
13 | import numpy as np
14 | import utils
15 | from model_encoder import Encoder
16 | from model_init import model_init
17 |
18 | sys.path.append("..")
19 | from evaluations.SLM import SLM
20 |
21 | def split_image(img_path, steps, cache_path):
22 | t1 = time.time()
23 |
24 | subimage_files_dir = os.path.join(cache_path, os.path.basename(img_path).split(".")[0])
25 |
26 | # 裁切图像文件夹
27 | subimages_dir = subimage_files_dir +'_subimages'
28 | if os.path.exists(subimages_dir):
29 | utils.delete_dire(subimages_dir)
30 | else:
31 | os.makedirs(subimages_dir)
32 |
33 | # Read Image
34 | source_img = cv2.imread(img_path)
35 | img_weight = np.shape(source_img)[0]
36 | img_height = np.shape(source_img)[1]
37 | logger.info("img size:{}x{}".format(img_weight, img_height))
38 |
39 | for step in steps:
40 | logger.info("Start split images with step {}".format(step))
41 | for gap in [step, 0.5 * step]:
42 | gap = int(gap)
43 |
44 | # Cut img
45 | for h in range(0 + (step - gap), img_height, step):
46 | h_start, h_end = h, h + step
47 | # bound?
48 | if h_end >= img_height:
49 | h_start, h_end = img_height - step, img_height
50 |
51 | for w in range(0 + (step - gap), img_weight, step):
52 | w_start, w_end = w, w + step
53 | # bound?
54 | if w_end >= img_weight:
55 | w_start, w_end = img_weight - step, img_weight
56 |
57 | cut_img_name = str(w_start) + "_" + str(w_end) + "_" + str(h_start) + "_" + str(h_end) + ".jpg"
58 | cut_img = source_img[w_start:w_end, h_start:h_end]
59 | cut_img = cv2.resize(cut_img, (256, 256), interpolation=cv2.INTER_CUBIC)
60 |
61 | cv2.imwrite(os.path.join(subimages_dir, cut_img_name), cut_img)
62 |
63 |
64 | logger.info("Image {} has been split successfully.".format(img_path))
65 |
66 | return time.time() - t1
67 |
68 | def generate_heatmap(img_path, text, output_file_h, output_file_a, output_file_p, cache_path):
69 |
70 | subimages_dir = os.path.join(cache_path, os.path.basename(img_path).split(".")[0]) +'_subimages'
71 |
72 | logger.info("Start calculate similarities ...")
73 | cal_start = time.time()
74 |
75 | # init encoder
76 | encoder = Encoder(model)
77 |
78 | # text vector
79 | text_vector = encoder.text_encoder(model, vocab, text)
80 |
81 | # read subimages
82 | subimages = os.listdir(subimages_dir)
83 | sim_results = []
84 | for subimage in subimages:
85 | image_vector = encoder.image_encoder(model, os.path.join(subimages_dir, subimage))
86 | sim_results.append(encoder.cosine_sim(text_vector, image_vector))
87 | cal_end = time.time()
88 | logger.info("Calculate similarities in {}s".format(cal_end-cal_start))
89 | t2 = cal_end-cal_start
90 |
91 | logger.info("Start generate heatmap ...")
92 | generate_start = time.time()
93 |
94 | # read Image
95 | source_img = cv2.imread(img_path)
96 | img_row = np.shape(source_img)[0]
97 | img_col = np.shape(source_img)[1]
98 |
99 | # mkdir map
100 | heat_map = np.zeros([img_row, img_col], dtype=float)
101 | heat_num = np.zeros([img_row, img_col], dtype=float)
102 | for idx,file in enumerate(subimages):
103 | r_start, r_end, c_start, c_end = file.replace(".jpg","").split("_")
104 |
105 | heat_map[int(r_start):int(r_end), int(c_start):int(c_end)] += sim_results[idx]
106 | heat_num[int(r_start):int(r_end), int(c_start):int(c_end)] += 1
107 |
108 |
109 | for i in range(np.shape(heat_map)[0]):
110 | for j in range(np.shape(heat_map)[1]):
111 | heat_map[i,j] = heat_map[i,j] / heat_num[i,j]
112 | t3 = time.time() - generate_start
113 |
114 | logger.info("Generate finished, start optim ...")
115 | optim_start = time.time()
116 | # filter
117 | adaptive = np.asarray(heat_map)
118 | adaptive = adaptive-np.min(adaptive)
119 | probmap = adaptive/np.max(adaptive)
120 | # must convert to type unit8
121 | probmap = np.uint8(255 * probmap)
122 | probmap = cv2.medianBlur(probmap,251)
123 | heatmap = cv2.applyColorMap(probmap, cv2.COLORMAP_JET)
124 | img_add = cv2.addWeighted(source_img, 0.7, heatmap, 0.3, 0)
125 | generate_end = time.time()
126 | logger.info("Generate heatmap in {}s".format(generate_end-generate_start))
127 |
128 | logger.info("Saving heatmap in {} ...".format(output_file_h))
129 | logger.info("Saving heatmap in {} ...".format(output_file_a))
130 | logger.info("Saving heatmap in {} ...".format(output_file_p))
131 | cv2.imwrite( output_file_p ,probmap)
132 | cv2.imwrite( output_file_h ,heatmap)
133 | cv2.imwrite( output_file_a ,img_add)
134 | logger.info("Saved ok.")
135 |
136 | # clear temp
137 | utils.delete_dire(subimages_dir)
138 | os.rmdir(subimages_dir)
139 |
140 | t4 = generate_end - optim_start
141 | return t2, t3, t4
142 |
143 | if __name__ == "__main__":
144 |
145 | import argparse
146 |
147 | # settings
148 | parser = argparse.ArgumentParser(description="SLM")
149 | parser.add_argument("--yaml_path", type=str, default="option/RSITMD/RSITMD_AMFMN.yaml", help="config yaml path")
150 | parser.add_argument("--cache_path", type=str, default="cache/RSITMD_AMFMN", help="cache path")
151 | parser.add_argument("--src_data_path", type=str, default="../test_data/imgs", help="testset images path")
152 | parser.add_argument("--src_anno_path", type=str, default="../test_data/annotations/anno.json", help="testset annotations path")
153 | parser.add_argument("--step", type=str, default="256_512_768", help="step")
154 | opt = parser.parse_args()
155 |
156 | # mkdir
157 | if not os.path.exists(opt.cache_path):
158 | os.mkdir(opt.cache_path)
159 |
160 | # params
161 | steps = [int(step) for step in opt.step.split("_")]
162 |
163 | # logging
164 | logger = utils.get_logger(os.path.join(opt.cache_path, 'log.txt'))
165 |
166 | # init model
167 | model, vocab = model_init(
168 | prefix_path = "./",
169 | yaml_path = opt.yaml_path
170 | )
171 |
172 | # start eval
173 | slm_metric = SLM()
174 |
175 | # load from annotations
176 | with open(opt.src_anno_path,'r',encoding='utf8')as fp:
177 | json_data = json.load(fp)
178 |
179 | t1_all, t2_all, t3_all, t4_all = 0, 0, 0, 0
180 | total_time = time.time()
181 |
182 | for idx, item in enumerate(json_data):
183 | # load sample
184 | img = item['jpg_name']
185 | text = item['caption']
186 | points = item['points']
187 |
188 | # path
189 | img_path = os.path.join(opt.src_data_path, img)
190 | heatmap_path = os.path.join(opt.cache_path, "heatmap_{}.jpg".format(idx))
191 | probmap_path = os.path.join(opt.cache_path, "probmap_{}.jpg".format(idx))
192 | addmap_path = os.path.join(opt.cache_path, "addmap_{}.jpg".format(idx))
193 |
194 | # logging
195 | logger.info("Processing {}/{}: {}".format(idx, len(json_data), img))
196 | logger.info("Corresponding text: {}".format(text))
197 |
198 | # processing
199 | t1 = split_image(img_path, steps, opt.cache_path)
200 | t2, t3, t4 = generate_heatmap(img_path, text, heatmap_path, addmap_path, probmap_path, opt.cache_path)
201 |
202 | t1_all += t1
203 | t2_all += t2
204 | t3_all += t3
205 | t4_all += t4
206 |
207 | # evaluate #
208 | metrics = slm_metric.evaluate(probmap_path, region_list=points)
209 | slm_metric.append_metric(metrics)
210 |
211 | slm_metric.get_the_mean_metric()
212 |
213 | all_time = time.time() - total_time
214 | logger.info("Time-Cut: {:.4f}s".format(t1_all))
215 | logger.info("Time-Sim: {:.4f}".format(t2_all))
216 | logger.info("Time-Gnt: {:.4f}".format(t3_all))
217 | logger.info("Time-Flt: {:.4f}".format(t4_all))
218 | logger.info("Time-Total: {:.4f}".format(all_time))
219 |
220 |
--------------------------------------------------------------------------------
/predict/generate_selo_v2.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/03
6 |
7 | import json
8 | import os
9 | import sys
10 | import time
11 |
12 | import cv2
13 | import numpy as np
14 |
15 | import utils
16 | from model_encoder import Encoder
17 | from model_init import model_init
18 |
19 | sys.path.append("..")
20 | from evaluations.SLM import SLM
21 |
22 | # 将图片按照512*512剪裁,并返回剪裁时间和剪切次数
23 | # img_path:图片位置,比如0.jpg
24 | # subimages_dir:剪裁的图片放的位置
25 | # cut_count:剪裁的次数
26 | def split_image(img_path, subimages_dir):
27 | t1 = time.time()
28 | # Read Image
29 | source_img = cv2.imread(img_path)
30 | img_height = np.shape(source_img)[0]
31 | img_weight = np.shape(source_img)[1]
32 | step = 512
33 | # 剪裁图像
34 | # Cut img
35 | for h in range(0, img_height, step):
36 | h_start, h_end = h, h + step
37 | if h_end >= img_height:
38 | h_start, h_end = img_height - step, img_height
39 | for w in range(0, img_weight, step):
40 | w_start, w_end = w, w + step
41 | # bound?
42 | if w_end >= img_weight:
43 | w_start, w_end = img_weight - step, img_weight
44 | cut_img_name = str(h_start) + "_" + str(h_end) + "_" + str(w_start) + "_" + str(w_end) + ".jpg"
45 | cut_img = source_img[h_start:h_end, w_start:w_end]
46 | cut_img = cv2.resize(cut_img, (256, 256), interpolation=cv2.INTER_CUBIC)
47 | cv2.imwrite(os.path.join(subimages_dir, cut_img_name), cut_img)
48 | split_time = time.time() - t1
49 |
50 | return split_time
51 |
52 | # 获取裁剪图像的特征向量
53 | # subimages_dir:剪裁好的图片所在位置,比如:0_subimages
54 | # img_vectors:每一张剪裁好图片的特征向量的列表集合
55 | # n_clusters:k-means的聚类个数
56 | # text:文本
57 | def get_img_vectors(subimages_dir, text, boximages_dir, encoder, img_path, sim_results1):
58 |
59 | t2 = time.time()
60 |
61 | # read subimages
62 | subimages = os.listdir(subimages_dir)
63 |
64 | # 获取文本的特征向量
65 | text_vector = encoder.text_encoder(model, vocab, text)
66 |
67 | random_times = 4
68 |
69 | for i in range(random_times):
70 | tmp = text.split()
71 | np.random.shuffle(tmp)
72 | text_bak = " ".join([i for i in tmp])
73 | text_vector += encoder.text_encoder(model, vocab, text_bak)
74 | text_vector /= (random_times + 1)
75 |
76 | # 获取每一个剪裁好图片的特征,并计算与文本的相似度
77 | for subimage in subimages:
78 | # 一维的特征向量
79 | image_vector = encoder.image_encoder(model, os.path.join(subimages_dir, subimage))
80 | sim = encoder.cosine_sim(image_vector,text_vector)
81 | sim_results1.append(sim)
82 |
83 |
84 | # 将相似度排序,返回索引列表
85 | sim_indexs = np.argsort(sim_results1)
86 | source_img = cv2.imread(img_path)
87 | img_height = np.shape(source_img)[0]
88 | img_weight = np.shape(source_img)[1]
89 |
90 |
91 | select1 = int(len(sim_indexs)*0.15)+1
92 | select_index1 = sim_indexs[-select1:]
93 |
94 | for i in select_index1:
95 | img_name = subimages[i]
96 | h_start, h_end, w_start, w_end = img_name.replace(".jpg", "").split("_")
97 | cut_img_size = 512
98 | p1 = [int(h_start), int(w_start)]
99 | p2 = [int(h_start), int(w_end)]
100 | p3 = [int(h_end), int(w_start)]
101 | p4 = [int(h_end), int(w_end)]
102 | p5 = [int(h_start) + cut_img_size * 1 / 4, int(w_start) + cut_img_size * 1 / 4]
103 | p6 = [int(h_start) + cut_img_size * 1 / 4, int(w_start) + cut_img_size * 3 / 4]
104 | p7 = [int(h_start) + cut_img_size * 3 / 4, int(w_start) + cut_img_size * 1 / 4]
105 | p8 = [int(h_start) + cut_img_size * 3 / 4, int(w_start) + cut_img_size * 3 / 4]
106 | p9 = [int(h_start) + cut_img_size * 1 / 2, int(w_start) + cut_img_size * 1 / 2]
107 | points = [p1, p2, p3, p4, p5, p6, p7, p8, p9]
108 | for point in points:
109 | h = point[0]
110 | w = point[1]
111 | sizes = [64,128]
112 | for size in sizes:
113 | h_start1 = int(h - size if h - size > 0 else 0)
114 | h_end1 = int(h_start1 + size * 2 if h_start1 + size * 2 < img_height else img_height)
115 | w_start1 = int(w - size if w - size > 0 else 0)
116 | w_end1 = int(w_start1 + size * 2 if w_start1 + size * 2 < img_weight else img_weight)
117 | box_img_name = str(h_start1) + "_" + str(h_end1) + "_" + str(w_start1) + "_" + str(w_end1) + ".jpg"
118 | box_img = source_img[h_start1:h_end1, w_start1:w_end1]
119 | box_img = cv2.resize(box_img, (256, 256), interpolation=cv2.INTER_CUBIC)
120 | cv2.imwrite(os.path.join(boximages_dir, box_img_name), box_img)
121 | stage2_time = time.time()-t2
122 |
123 |
124 | return text_vector,select_index1,stage2_time
125 |
126 | def generate_heatmap(img_path, output_file_h, output_file_a, output_file_p, text_vector, boximages_dir, subimages_dir,sim_results1,select_index1,
127 | encoder):
128 |
129 | t3 = time.time()
130 |
131 | # read subimages
132 | boximages = os.listdir(boximages_dir)
133 | subimages = os.listdir(subimages_dir)
134 |
135 | sim_results = []
136 | for boximage in boximages:
137 | image_vector = encoder.image_encoder(model, os.path.join(boximages_dir, boximage))
138 | sim_results.append(encoder.cosine_sim(text_vector, image_vector))
139 |
140 |
141 | # read Image
142 | source_img = cv2.imread(img_path)
143 | img_row = np.shape(source_img)[0]
144 | img_col = np.shape(source_img)[1]
145 |
146 | # mkdir map
147 | heat_map = np.zeros([img_row, img_col], dtype=float)
148 | heat_num = np.zeros([img_row, img_col], dtype=float)
149 |
150 | for idx in select_index1:
151 | h_start, h_end, w_start, w_end = subimages[idx].replace(".jpg", "").split("_")
152 | heat_map[int(h_start):int(h_end), int(w_start):int(w_end)] += sim_results1[idx]
153 | heat_num[int(h_start):int(h_end), int(w_start):int(w_end)] += 1
154 |
155 |
156 | for idx,file in enumerate(boximages):
157 | h_start, h_end, w_start, w_end = file.replace(".jpg","").split("_")
158 | heat_map[int(h_start):int(h_end), int(w_start):int(w_end)] += sim_results[idx]
159 | heat_num[int(h_start):int(h_end), int(w_start):int(w_end)] += 1
160 |
161 | for i in range(np.shape(heat_map)[0]):
162 | for j in range(np.shape(heat_map)[1]):
163 | if(heat_num[i,j]!=0.0):
164 | heat_map[i,j] = heat_map[i,j] / heat_num[i,j]
165 |
166 | # filter
167 | adaptive = np.asarray(heat_map)
168 | adaptive = adaptive-np.min(adaptive)
169 | probmap = adaptive/np.max(adaptive)
170 | # must convert to type unit8
171 | probmap = np.uint8(255 * probmap)
172 | # 中值滤波
173 | # probmap = cv2.medianBlur(probmap,251)
174 | # 高斯滤波
175 | probmap = cv2.GaussianBlur(probmap, (255, 255), 0, 0)
176 | heatmap = cv2.applyColorMap(probmap, cv2.COLORMAP_JET)
177 | img_add = cv2.addWeighted(source_img, 0.7, heatmap, 0.3, 0)
178 | t3 = time.time()-t3
179 |
180 | cv2.imwrite( output_file_p ,probmap)
181 | cv2.imwrite( output_file_h ,heatmap)
182 | cv2.imwrite( output_file_a ,img_add)
183 |
184 | # clear temp
185 | utils.delete_dire(boximages_dir)
186 | os.rmdir(boximages_dir)
187 |
188 | utils.delete_dire(subimages_dir)
189 | os.rmdir(subimages_dir)
190 |
191 | return t3
192 |
193 |
194 | if __name__ == "__main__":
195 |
196 | import argparse
197 |
198 | # settings
199 | parser = argparse.ArgumentParser(description="SLM")
200 | parser.add_argument("--yaml_path", type=str, default="option/RSITMD/RSITMD_AMFMN.yaml", help="config yaml path")
201 | parser.add_argument("--cache_path", type=str, default="cache/RSITMD_AMFMN", help="cache path")
202 | parser.add_argument("--src_data_path", type=str, default="../test_data/imgs", help="testset images path")
203 | parser.add_argument("--src_anno_path", type=str, default="../test_data/annotations/anno.json",
204 | help="testset annotations path")
205 | opt = parser.parse_args()
206 |
207 | # mkdir
208 | if not os.path.exists(opt.cache_path):
209 | os.mkdir(opt.cache_path)
210 |
211 | # logging
212 | logger = utils.get_logger(os.path.join(opt.cache_path, 'log.txt'))
213 |
214 | # init model
215 | model, vocab = model_init(
216 | prefix_path="./",
217 | yaml_path=opt.yaml_path
218 | )
219 |
220 | # init encoder
221 | encoder = Encoder(model)
222 |
223 | # start eval
224 | slm_metric = SLM()
225 |
226 | # load from annotations
227 | with open(opt.src_anno_path, 'r', encoding='utf8')as fp:
228 | json_data = json.load(fp)
229 |
230 | total_time = time.time()
231 | second_time_list = []
232 |
233 | for idx, item in enumerate(json_data):
234 | # load sample
235 | img = item['jpg_name']
236 | text = item['caption']
237 | points = item['points']
238 | count = 0
239 |
240 | # path
241 | img_path = os.path.join(opt.src_data_path, img)
242 | subimage_files_dir = os.path.join(opt.cache_path, os.path.basename(img_path).split(".")[0])
243 | heatmap_path = os.path.join(opt.cache_path, "heatmap_{}.jpg".format(idx))
244 | probmap_path = os.path.join(opt.cache_path, "probmap_{}.jpg".format(idx))
245 | addmap_path = os.path.join(opt.cache_path, "addmap_{}.jpg".format(idx))
246 |
247 | # 裁切图像文件夹
248 | subimages_dir = subimage_files_dir + '_subimages'
249 | # anchor_box的文件夹
250 | boximages_dir = subimage_files_dir + '_boximages'
251 | if os.path.exists(subimages_dir):
252 | utils.delete_dire(subimages_dir)
253 | else:
254 | os.makedirs(subimages_dir)
255 |
256 | if (os.path.exists(boximages_dir)):
257 | utils.delete_dire(boximages_dir)
258 | else:
259 | os.makedirs(boximages_dir)
260 |
261 | times = time.time()
262 |
263 | # processing
264 | split_image(img_path, subimages_dir)
265 |
266 | sim_results1 = []
267 | select_index1 = []
268 | stage2_time = 0
269 | text_vector, select_index1, stage2_time = get_img_vectors(subimages_dir, text, boximages_dir, encoder, img_path,
270 | sim_results1)
271 |
272 | generate_heatmap(img_path, heatmap_path, addmap_path, probmap_path, text_vector,
273 | boximages_dir, subimages_dir, sim_results1, select_index1, encoder)
274 |
275 | times = time.time() - times
276 | second_time_list.append(times)
277 | logger.info("Processing {}/{}: {},的总时间为:{}".format(idx, len(json_data), img, times))
278 | logger.info("Corresponding text: {}".format(text))
279 |
280 | # evaluate #
281 | metrics = slm_metric.evaluate(probmap_path, region_list=points)
282 | slm_metric.append_metric(metrics)
283 |
284 | slm_metric.get_the_mean_metric()
285 |
286 | all_time = time.time() - total_time
287 | logger.info("Time-Total: {:.4f}".format(all_time))
288 | logger.info("second_time_list={}".format(second_time_list))
289 |
--------------------------------------------------------------------------------
/predict/layers/AMFMN.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------
2 | # "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information"
3 | # Yuan, Zhiqiang and Zhang, Wenkai and Changyuan Tian and Xuee, Rong and Zhengyuan Zhang and Wang, Hongqi and Fu, Kun and Sun, Xian
4 | # Writen by YuanZhiqiang, 2021. Our code is depended on AMFMN
5 | # ------------------------------------------------------------
6 | import copy
7 |
8 | import torch.nn.init
9 |
10 | from .AMFMN_utils import *
11 |
12 |
13 | def l2norm(X, dim, eps=1e-8):
14 | """L2-normalize columns of X
15 | """
16 | norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
17 | X = torch.div(X, norm)
18 | return X
19 |
20 | def cosine_sim(im, s):
21 | """Cosine similarity between all the image and sentence pairs
22 | """
23 | im = l2norm(im, dim=-1)
24 | s = l2norm(s, dim=-1)
25 | w12 = im.mm(s.t())
26 | return w12
27 |
28 | class BaseModel(nn.Module):
29 | def __init__(self, opt={}, vocab_words=[]):
30 | super(BaseModel, self).__init__()
31 |
32 | # img feature
33 | self.extract_feature = ExtractFeature(opt = opt)
34 | self.drop_g_v = nn.Dropout(0.3)
35 |
36 | # vsa feature
37 | self.mvsa =VSA_Module(opt = opt)
38 |
39 | # text feature
40 | self.text_feature = Skipthoughts_Embedding_Module(
41 | vocab= vocab_words,
42 | opt = opt
43 | )
44 |
45 | # weight
46 | self.gw = opt['global_local_weight']['global']
47 | self.lw = opt['global_local_weight']['local']
48 |
49 | self.Eiters = 0
50 |
51 | self.model_name = 'AMFMN'
52 |
53 | def forward(self, img, input_local_rep, input_local_adj, text, text_lens=None):
54 |
55 | # extract features
56 | lower_feature, higher_feature, solo_feature = self.extract_feature(img)
57 |
58 | # mvsa featrues
59 | global_feature = self.mvsa(lower_feature, higher_feature, solo_feature)
60 |
61 | # text features
62 | text_feature = self.text_feature(text)
63 |
64 | sims = cosine_sim(global_feature, text_feature)
65 | return sims
66 |
67 | def factory(opt, vocab_words, cuda=True, data_parallel=True):
68 | opt = copy.copy(opt)
69 |
70 | model = BaseModel(opt, vocab_words)
71 |
72 | if data_parallel:
73 | model = nn.DataParallel(model).cuda()
74 | if not cuda:
75 | raise ValueError
76 |
77 | if cuda:
78 | model.cuda()
79 |
80 | return model
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/predict/layers/AMFMN_utils.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------
2 | # "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information"
3 | # Yuan, Zhiqiang and Zhang, Wenkai and Changyuan Tian and Xuee, Rong and Zhengyuan Zhang and Wang, Hongqi and Fu, Kun and Sun, Xian
4 | # Writen by YuanZhiqiang, 2021. Our code is depended on AMFMN
5 | # ------------------------------------------------------------
6 | import math
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 | import torch.nn.init
12 | from layers import seq2vec
13 | from torchvision.models.resnet import resnet18
14 |
15 |
16 | class FC(nn.Module):
17 | def __init__(self, in_size, out_size, dropout_r=0., use_relu=True):
18 | super(FC, self).__init__()
19 | self.dropout_r = dropout_r
20 | self.use_relu = use_relu
21 |
22 | self.linear = nn.Linear(in_size, out_size)
23 |
24 | if use_relu:
25 | self.relu = nn.ReLU(inplace=True)
26 |
27 | if dropout_r > 0:
28 | self.dropout = nn.Dropout(dropout_r)
29 |
30 | def forward(self, x):
31 | x = self.linear(x)
32 |
33 | if self.use_relu:
34 | x = self.relu(x)
35 |
36 | if self.dropout_r > 0:
37 | x = self.dropout(x)
38 |
39 | return x
40 |
41 |
42 | class MLP(nn.Module):
43 | def __init__(self, in_size, mid_size, out_size, dropout_r=0., use_relu=True):
44 | super(MLP, self).__init__()
45 |
46 | self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
47 | self.linear = nn.Linear(mid_size, out_size)
48 |
49 | def forward(self, x):
50 | out = self.fc(x)
51 | return self.linear(out)
52 |
53 |
54 | class LayerNorm(nn.Module):
55 | def __init__(self, size, eps=1e-6):
56 | super(LayerNorm, self).__init__()
57 | self.eps = eps
58 |
59 | self.a_2 = nn.Parameter(torch.ones(size))
60 | self.b_2 = nn.Parameter(torch.zeros(size))
61 |
62 | def forward(self, x):
63 | mean = x.mean(-1, keepdim=True)
64 | std = x.std(-1, keepdim=True)
65 |
66 | return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
67 |
68 |
69 |
70 | # ------------------------------
71 | # ---- Multi-Head Attention ----
72 | # ------------------------------
73 |
74 | class MHAtt(nn.Module):
75 | def __init__(self, __C):
76 | super(MHAtt, self).__init__()
77 | self.__C = __C
78 |
79 | self.linear_v = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
80 | self.linear_k = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
81 | self.linear_q = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
82 | self.linear_merge = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
83 |
84 | self.dropout = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
85 |
86 | def forward(self, v, k, q, mask=None):
87 | n_batches = q.size(0)
88 |
89 | v = self.linear_v(v).view(
90 | n_batches,
91 | -1,
92 | self.__C['fusion']['mca_MULTI_HEAD'],
93 | self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']
94 | ).transpose(1, 2)
95 |
96 | k = self.linear_k(k).view(
97 | n_batches,
98 | -1,
99 | self.__C['fusion']['mca_MULTI_HEAD'],
100 | self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']
101 | ).transpose(1, 2)
102 |
103 | q = self.linear_q(q).view(
104 | n_batches,
105 | -1,
106 | self.__C['fusion']['mca_MULTI_HEAD'],
107 | self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']
108 | ).transpose(1, 2)
109 |
110 | atted = self.att(v, k, q, mask)
111 | atted = atted.transpose(1, 2).contiguous().view(
112 | n_batches,
113 | -1,
114 | self.__C['fusion']['mca_HIDDEN_SIZE']
115 | )
116 |
117 | atted = self.linear_merge(atted)
118 |
119 | return atted
120 |
121 | def att(self, value, key, query, mask=None):
122 | d_k = query.size(-1)
123 |
124 | scores = torch.matmul(
125 | query, key.transpose(-2, -1)
126 | ) / math.sqrt(d_k)
127 |
128 | if mask is not None:
129 | scores = scores.masked_fill(mask, -1e9)
130 |
131 | att_map = F.softmax(scores, dim=-1)
132 | att_map = self.dropout(att_map)
133 |
134 | return torch.matmul(att_map, value)
135 |
136 |
137 | # ---------------------------
138 | # ---- Feed Forward Nets ----
139 | # ---------------------------
140 |
141 | class FFN(nn.Module):
142 | def __init__(self, __C):
143 | super(FFN, self).__init__()
144 |
145 | self.mlp = MLP(
146 | in_size=__C['fusion']['mca_HIDDEN_SIZE'],
147 | mid_size=__C['fusion']['mca_FF_SIZE'],
148 | out_size=__C['fusion']['mca_HIDDEN_SIZE'],
149 | dropout_r=__C['fusion']['mca_DROPOUT_R'],
150 | use_relu=True
151 | )
152 |
153 | def forward(self, x):
154 | return self.mlp(x)
155 |
156 |
157 | # ------------------------
158 | # ---- Self Attention ----
159 | # ------------------------
160 |
161 | class SA(nn.Module):
162 | def __init__(self, __C):
163 | super(SA, self).__init__()
164 |
165 | self.mhatt = MHAtt(__C)
166 | self.ffn = FFN(__C)
167 |
168 | self.dropout1 = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
169 | self.norm1 = LayerNorm(__C['fusion']['mca_HIDDEN_SIZE'])
170 |
171 | self.dropout2 = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
172 | self.norm2 = LayerNorm(__C['fusion']['mca_HIDDEN_SIZE'])
173 |
174 | def forward(self, x, x_mask=None):
175 | x = self.norm1(x + self.dropout1(
176 | self.mhatt(x, x, x, x_mask)
177 | ))
178 |
179 | x = self.norm2(x + self.dropout2(
180 | self.ffn(x)
181 | ))
182 |
183 | return x
184 |
185 |
186 | # -------------------------------
187 | # ---- Self Guided Attention ----
188 | # -------------------------------
189 |
190 | class SGA(nn.Module):
191 | def __init__(self, __C):
192 | super(SGA, self).__init__()
193 |
194 | self.mhatt1 = MHAtt(__C)
195 | self.mhatt2 = MHAtt(__C)
196 | self.ffn = FFN(__C)
197 |
198 | self.dropout1 = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
199 | self.norm1 = LayerNorm(__C['fusion']['mca_HIDDEN_SIZE'])
200 |
201 | self.dropout2 = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
202 | self.norm2 = LayerNorm(__C['fusion']['mca_HIDDEN_SIZE'])
203 |
204 | self.dropout3 = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
205 | self.norm3 = LayerNorm(__C['fusion']['mca_HIDDEN_SIZE'])
206 |
207 | def forward(self, x, y, x_mask=None, y_mask=None):
208 | x = self.norm1(x + self.dropout1(
209 | self.mhatt1(x, x, x, x_mask)
210 | ))
211 |
212 | x = self.norm2(x + self.dropout2(
213 | self.mhatt2(y, y, x, y_mask)
214 | ))
215 |
216 | x = self.norm3(x + self.dropout3(
217 | self.ffn(x)
218 | ))
219 |
220 | return x
221 |
222 |
223 | def l2norm(X, dim, eps=1e-8):
224 | """L2-normalize columns of X
225 | """
226 | norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
227 | X = torch.div(X, norm)
228 | return X
229 |
230 | class ExtractFeature(nn.Module):
231 | def __init__(self, opt = {}, finetune=True):
232 | super(ExtractFeature, self).__init__()
233 |
234 | self.embed_dim = opt['embed']['embed_dim']
235 |
236 | self.resnet = resnet18(pretrained=True)
237 | for param in self.resnet.parameters():
238 | param.requires_grad = finetune
239 |
240 | self.pool_2x2 = nn.MaxPool2d(4)
241 |
242 | self.up_sample_2 = nn.Upsample(scale_factor=2, mode='nearest')
243 | self.up_sample_4 = nn.Upsample(scale_factor=4, mode='nearest')
244 |
245 | self.linear = nn.Linear(in_features=512, out_features=self.embed_dim)
246 |
247 | def forward(self, img):
248 | x = self.resnet.conv1(img)
249 | x = self.resnet.bn1(x)
250 | x = self.resnet.relu(x)
251 | x = self.resnet.maxpool(x)
252 |
253 | f1 = self.resnet.layer1(x)
254 | f2 = self.resnet.layer2(f1)
255 | f3 = self.resnet.layer3(f2)
256 | f4 = self.resnet.layer4(f3)
257 |
258 | # Lower Feature
259 | f2_up = self.up_sample_2(f2)
260 | lower_feature = torch.cat([f1, f2_up], dim=1)
261 |
262 | # Higher Feature
263 | f4_up = self.up_sample_2(f4)
264 | higher_feature = torch.cat([f3, f4_up], dim=1)
265 | # higher_feature = self.up_sample_4(higher_feature)
266 |
267 | # batch * 512
268 | feature = f4.view(f4.shape[0], 512, -1)
269 | solo_feature = self.linear(torch.mean(feature,dim=-1))
270 |
271 | # torch.Size([10, 192, 64, 64])
272 | # torch.Size([10, 768, 64, 64])
273 | # torch.Size([10, 512])
274 | return lower_feature, higher_feature, solo_feature
275 |
276 | class VSA_Module(nn.Module):
277 | def __init__(self, opt = {}):
278 | super(VSA_Module, self).__init__()
279 |
280 | # extract value
281 | channel_size = opt['multiscale']['multiscale_input_channel']
282 | out_channels = opt['multiscale']['multiscale_output_channel']
283 | embed_dim = opt['embed']['embed_dim']
284 |
285 | # sub sample
286 | self.LF_conv = nn.Conv2d(in_channels=192, out_channels=channel_size, kernel_size=3, stride=4)
287 | self.HF_conv = nn.Conv2d(in_channels=768, out_channels=channel_size, kernel_size=1, stride=1)
288 |
289 | # visual attention
290 | self.conv1x1_1 = nn.Conv2d(in_channels=channel_size*2, out_channels=out_channels, kernel_size=1)
291 | self.conv1x1_2 = nn.Conv2d(in_channels=channel_size*2, out_channels=out_channels, kernel_size=1)
292 |
293 | # solo attention
294 | self.solo_attention = nn.Linear(in_features=256, out_features=embed_dim)
295 |
296 | def forward(self, lower_feature, higher_feature, solo_feature):
297 |
298 | # b x channel_size x 16 x 16
299 | lower_feature = self.LF_conv(lower_feature)
300 | higher_feature = self.HF_conv(higher_feature)
301 |
302 | # concat
303 | concat_feature = torch.cat([lower_feature, higher_feature], dim=1)
304 |
305 | # residual
306 | concat_feature = higher_feature.mean(dim=1,keepdim=True).expand_as(concat_feature) + concat_feature
307 |
308 | # attention
309 | main_feature = self.conv1x1_1(concat_feature)
310 | attn_feature = torch.sigmoid(self.conv1x1_2(concat_feature).view(concat_feature.shape[0],1,-1)).view(concat_feature.shape[0], 1, main_feature.shape[2], main_feature.shape[3])
311 | atted_feature = (main_feature*attn_feature).squeeze(dim=1).view(attn_feature.shape[0], -1)
312 |
313 | # solo attention
314 | solo_att = torch.sigmoid(self.solo_attention(atted_feature))
315 | solo_feature = solo_feature*solo_att
316 |
317 | return l2norm(solo_feature, -1)
318 |
319 | class Skipthoughts_Embedding_Module(nn.Module):
320 | def __init__(self, vocab, opt, out_dropout=-1):
321 | super(Skipthoughts_Embedding_Module, self).__init__()
322 | self.opt = opt
323 | self.vocab_words = vocab
324 |
325 | self.seq2vec = seq2vec.factory(self.vocab_words, self.opt['seq2vec'], self.opt['seq2vec']['dropout'])
326 |
327 | self.to_out = nn.Linear(in_features=2400, out_features=self.opt['embed']['embed_dim'])
328 | self.dropout = out_dropout
329 |
330 | def forward(self, input_text ):
331 | x_t_vec = self.seq2vec(input_text)
332 | out = F.relu(self.to_out(x_t_vec))
333 | if self.dropout >= 0:
334 | out = F.dropout(out, self.dropout)
335 |
336 | return out
337 |
338 | def params_count(model):
339 | count = 0
340 | for p in model.parameters():
341 | c = 1
342 | for i in range(p.dim()):
343 | c *= p.size(i)
344 | count += c
345 | return count
346 |
347 | def bcosine_sim(im, s):
348 | """Cosine similarity between all the image and sentence pairs
349 | """
350 | im = l2norm(im, dim=-1)
351 | s = l2norm(s, dim=-1)
352 | w12 = im.mm(s.t())
353 | return w12
354 | # ====================================================================
355 | # About GCN
356 | class GCN(nn.Module):
357 | def __init__(self , dim_in=20 , dim_out=20, dim_embed = 512):
358 | super(GCN,self).__init__()
359 |
360 | self.fc1 = nn.Linear(dim_in ,dim_in,bias=False)
361 | self.fc2 = nn.Linear(dim_in,dim_in//2,bias=False)
362 | self.fc3 = nn.Linear(dim_in//2,dim_out,bias=False)
363 |
364 | self.out = nn.Linear(dim_out * dim_in, dim_embed)
365 |
366 | def forward(self, A, X):
367 | batch, objects, rep = X.shape[0], X.shape[1], X.shape[2]
368 |
369 | # first layer
370 | tmp = (A.bmm(X)).view(-1, rep)
371 | X = F.relu(self.fc1(tmp))
372 | X = X.view(batch, -1, X.shape[-1])
373 |
374 | # second layer
375 | tmp = (A.bmm(X)).view(-1, X.shape[-1])
376 | X = F.relu(self.fc2(tmp))
377 | X = X.view(batch, -1, X.shape[-1])
378 |
379 | # third layer
380 | tmp = (A.bmm(X)).view(-1, X.shape[-1])
381 | X = F.relu(self.fc3(tmp))
382 | X = X.view(batch, -1)
383 |
384 | return l2norm(self.out(X), -1)
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
--------------------------------------------------------------------------------
/predict/layers/LW_MCR.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/05
6 |
7 | import copy
8 |
9 | from .MCR_utils import *
10 |
11 |
12 | class unsupervised_Visual_Model(nn.Module):
13 | def __init__(self, feature_dim=128):
14 | super(unsupervised_Visual_Model, self).__init__()
15 |
16 | self.extract_feature = ExtractFeature()
17 |
18 | # projection head
19 | self.g = nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512),
20 | nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
21 |
22 | def forward(self, x):
23 | feature, _, _ = self.extract_feature(x)
24 | out = self.g(feature)
25 | return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
26 |
27 | class unsupervised_Text_Model(nn.Module):
28 | def __init__(self, feature_dim=128, vocab_words=[]):
29 | super(unsupervised_Text_Model, self).__init__()
30 |
31 |
32 | self.text_feature = textCNN(
33 | vocab= vocab_words,
34 | opt = None
35 | )
36 | # projection head
37 | self.g = nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512),
38 | nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
39 |
40 | def forward(self, x):
41 | feature = self.text_feature(x)
42 | out = self.g(feature)
43 | return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
44 |
45 | class BaseModel(nn.Module):
46 | def __init__(self, opt={}, vocab_words=[]):
47 | super(BaseModel, self).__init__()
48 |
49 | # img feature
50 | self.extract_feature = ExtractFeature()
51 |
52 | # self.pre_train_extroctor = unsupervised_Visual_Model()
53 | # state = torch.load('./model/unsupervised_pretrain_model/visual_pre.pth')
54 | # self.pre_train_extroctor.load_state_dict(state)
55 | # del state
56 | # self.extract_feature = self.pre_train_extroctor.extract_feature
57 | # del self.pre_train_extroctor
58 |
59 |
60 |
61 | self.text_feature = textCNN(
62 | vocab= vocab_words,
63 | opt = opt
64 | )
65 |
66 | # self.pre_train_extroctor = unsupervised_Text_Model(vocab_words= vocab_words,)
67 | # state = torch.load('./model/unsupervised_pretrain_model/text_pre.pth')
68 | # self.pre_train_extroctor.load_state_dict(state)
69 | # del state
70 | # self.text_feature = self.pre_train_extroctor.text_feature
71 | # del self.pre_train_extroctor
72 |
73 | self.Eiters = 0
74 |
75 | self.model_name = 'LW_MCR'
76 |
77 |
78 | def forward(self, img, text, text_lens=None):
79 |
80 | # extract features
81 | visual_feature, lower_feature, higher_feature = self.extract_feature(img)
82 |
83 | # text features
84 | text_feature = self.text_feature(text)
85 |
86 | # print("visual_feature.shape:{}".format(visual_feature.shape))
87 | # print("text_feature.shape:{}".format(text_feature.shape))
88 | # visual_feature.shape: torch.Size([8, 512])
89 | # text_feature.shape: torch.Size([8, 512])
90 | # print("=========================")
91 | # exit(0)
92 |
93 | sims = cosine_sim(visual_feature, text_feature)
94 |
95 | return sims, [visual_feature, text_feature, lower_feature, higher_feature]
96 |
97 | def factory(opt, vocab_words, cuda=True, data_parallel=True,device_ids=[0]):
98 | opt = copy.copy(opt)
99 |
100 | model = BaseModel(opt, vocab_words)
101 |
102 | if data_parallel:
103 | model = nn.DataParallel(model, device_ids=device_ids).cuda()
104 | if not cuda:
105 | raise ValueError
106 |
107 | if cuda:
108 | model.cuda()
109 |
110 | return model
111 |
112 |
113 |
--------------------------------------------------------------------------------
/predict/layers/MCR_utils.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/05
6 |
7 | import torch
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 | import torch.nn.init
11 |
12 |
13 | def l2norm(X, dim, eps=1e-8):
14 | """L2-normalize columns of X
15 | """
16 | norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
17 | X = torch.div(X, norm)
18 | return X
19 |
20 | # ----------------------------------------------------------------------------
21 | # ------------------------- Visual Feature Encoding ------------------
22 |
23 | class ChannelAttention(nn.Module):
24 | def __init__(self, in_planes, ratio=16):
25 | super(ChannelAttention, self).__init__()
26 | self.avg_pool = nn.AdaptiveAvgPool2d(1)
27 | self.max_pool = nn.AdaptiveMaxPool2d(1)
28 |
29 | self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 4, 1, bias=False),
30 | nn.ReLU(),
31 | nn.Conv2d(in_planes // 4, in_planes, 1, bias=False))
32 | self.sigmoid = nn.Sigmoid()
33 |
34 | def forward(self, x):
35 | avg_out = self.fc(self.avg_pool(x))
36 | max_out = self.fc(self.max_pool(x))
37 | out = avg_out + max_out
38 | return self.sigmoid(out)
39 |
40 |
41 | class SpatialAttention(nn.Module):
42 | def __init__(self, kernel_size=7):
43 | super(SpatialAttention, self).__init__()
44 |
45 | self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
46 | self.sigmoid = nn.Sigmoid()
47 |
48 | def forward(self, x):
49 | avg_out = torch.mean(x, dim=1, keepdim=True)
50 | max_out, _ = torch.max(x, dim=1, keepdim=True)
51 | x = torch.cat([avg_out, max_out], dim=1)
52 | x = self.conv1(x)
53 | return self.sigmoid(x)
54 |
55 | class Fire(nn.Module):
56 |
57 | def __init__(self, inplanes, squeeze_planes,
58 | expand1x1_planes, expand3x3_planes):
59 | super(Fire, self).__init__()
60 | self.inplanes = inplanes
61 | self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
62 | self.squeeze_activation = nn.ReLU(inplace=True)
63 | self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
64 | kernel_size=1)
65 | self.expand1x1_activation = nn.ReLU(inplace=True)
66 | self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
67 | kernel_size=3, padding=1)
68 | self.expand3x3_activation = nn.ReLU(inplace=True)
69 |
70 | def forward(self, x):
71 | x = self.squeeze_activation(self.squeeze(x))
72 | return torch.cat([
73 | self.expand1x1_activation(self.expand1x1(x)),
74 | self.expand3x3_activation(self.expand3x3(x))
75 | ], 1)
76 |
77 | class redefine_squeezenet(nn.Module):
78 | def __init__(self, ):
79 | super(redefine_squeezenet, self).__init__()
80 |
81 | self.conv_1 = nn.Sequential(
82 | nn.Conv2d(3, 64, kernel_size=3, stride=2),
83 | nn.ReLU(inplace=True),
84 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
85 | Fire(64, 16, 64, 64),
86 | Fire(128, 16, 64, 64),
87 | )
88 | self.conv_2 = nn.Sequential(
89 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
90 | Fire(128, 32, 128, 128),
91 | Fire(256, 32, 128, 128),
92 | nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
93 | Fire(256, 48, 192, 192),
94 | Fire(384, 48, 192, 192),
95 | Fire(384, 64, 256, 256),
96 | Fire(512, 64, 256, 256),
97 | )
98 | # Final convolution is initialized differently from the rest
99 | final_conv = nn.Conv2d(512, 1000, kernel_size=1)
100 | self.classifier = nn.Sequential(
101 | nn.Dropout(p=0.5),
102 | final_conv,
103 | nn.ReLU(inplace=True),
104 | nn.AdaptiveAvgPool2d((1, 1))
105 | )
106 |
107 | def forward(self, img):
108 | low_feature = self.conv_1(img)
109 | high_feature = self.conv_2(low_feature)
110 | results = self.classifier(high_feature)
111 | return results
112 |
113 | def convert_squeezenet(squeeze, net):#vgg16是pytorch自带的
114 | vgg_items = net.state_dict().items()
115 | vgg16_items = squeeze.items()
116 |
117 | pretrain_model = {}
118 | j = 0
119 | for k, v in net.state_dict().items():#按顺序依次填入
120 | v = list(vgg16_items)[j][1]
121 | k = list(vgg_items)[j][0]
122 | pretrain_model[k] = v
123 | j += 1
124 | return pretrain_model
125 |
126 | class extract_by_squeezenet(nn.Module):
127 | def __init__(self, ):
128 | super(extract_by_squeezenet, self).__init__()
129 |
130 | net = redefine_squeezenet()
131 |
132 | self.conv_1 = net.conv_1
133 | self.conv_2 = net.conv_2
134 |
135 | def forward(self, img):
136 | low_feature = self.conv_1(img)
137 | high_feature = self.conv_2(low_feature)
138 | return low_feature, high_feature
139 |
140 | class Pretrain_visual_extractor(nn.Module):
141 | def __init__(self, parms=None):
142 | super(Pretrain_visual_extractor, self).__init__()
143 | # backbone = resnet18(pretrained=True)
144 | # self.backbone = myResnet(backbone)
145 | self.backbone = extract_by_squeezenet()
146 |
147 | # lower and higher transformation
148 | self.lower_trans = nn.Sequential(
149 | nn.Conv2d(in_channels=128, out_channels=64, stride=2, kernel_size=3, padding=1),
150 | nn.BatchNorm2d(num_features=64),
151 | nn.ReLU(inplace=True)
152 | )
153 | self.higher_trans = nn.Sequential(
154 | nn.Upsample(scale_factor=2, mode='nearest'),
155 | nn.Conv2d(in_channels=512, out_channels=64, stride=1, kernel_size=1, padding=1),
156 | nn.BatchNorm2d(num_features=64),
157 | nn.ReLU(inplace=True)
158 | )
159 |
160 | # BCNN
161 |
162 | # image self attention
163 | self.ISA_1_c = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3, stride=2, padding=1)
164 | self.ISA_1_b = nn.BatchNorm2d(4)
165 | self.ISA_1_p = nn.PReLU()
166 | self.ISA_1_ca = ChannelAttention(4)
167 | self.ISA_1_sa = SpatialAttention()
168 | self.ISA_1_pool = nn.MaxPool2d(kernel_size=2)
169 |
170 | self.ISA_2_c = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3, stride=2, padding=1)
171 | self.ISA_2_b = nn.BatchNorm2d(8)
172 | self.ISA_2_p = nn.PReLU()
173 | self.ISA_2_ca = ChannelAttention(8)
174 | self.ISA_2_sa = SpatialAttention()
175 |
176 | self.fc = torch.nn.Linear(64*8, 10)
177 |
178 | def forward(self, img):
179 | N = img.size()[0]
180 |
181 | # backbone
182 | lower_feature, higher_feature = self.backbone(img)
183 |
184 | # lower and higher transformation
185 | lower_feature = self.lower_trans(lower_feature)
186 | higher_feature = self.higher_trans(higher_feature)
187 |
188 | # BCNN
189 | lower_feature = lower_feature.view(N, 64, 32*32)
190 | higher_feature = higher_feature.view(N, 64, 32*32)
191 | X = torch.bmm(lower_feature, torch.transpose(higher_feature, 1, 2)) / (32 ** 2)
192 |
193 | X = torch.sqrt(X + 1e-5)
194 | X = torch.nn.functional.normalize(X)
195 | X = torch.unsqueeze(X, dim=1)
196 |
197 | # image self attention
198 | X = self.ISA_1_c(X)
199 | X = self.ISA_1_b(X)
200 | X = self.ISA_1_p(X)
201 | X = self.ISA_1_ca(X) * X
202 | X = self.ISA_1_sa(X) * X
203 | X = self.ISA_1_pool(X)
204 |
205 | X = self.ISA_2_c(X)
206 | X = self.ISA_2_b(X)
207 | X = self.ISA_2_p(X)
208 | X = self.ISA_2_ca(X) * X
209 | X = self.ISA_2_sa(X) * X
210 |
211 | X = X.view(N, -1)
212 | X = self.fc(X)
213 |
214 | return X
215 |
216 | class ExtractFeature(nn.Module):
217 | def __init__(self):
218 | super(ExtractFeature, self).__init__()
219 | self.pre_train_extroctor = Pretrain_visual_extractor()
220 |
221 | self.backbone = self.pre_train_extroctor.backbone
222 |
223 | # lower and higher transformation
224 | self.lower_trans = self.pre_train_extroctor.lower_trans
225 | self.higher_trans = self.pre_train_extroctor.higher_trans
226 |
227 | # BCNN
228 |
229 | # image self attention
230 | self.ISA_1_c = self.pre_train_extroctor.ISA_1_c
231 | self.ISA_1_b = self.pre_train_extroctor.ISA_1_b
232 | self.ISA_1_p = self.pre_train_extroctor.ISA_1_p
233 | self.ISA_1_ca = self.pre_train_extroctor.ISA_1_ca
234 | self.ISA_1_sa = self.pre_train_extroctor.ISA_1_sa
235 | self.ISA_1_pool = self.pre_train_extroctor.ISA_1_pool
236 | self.dropout_1 = nn.Dropout(0.2)
237 |
238 | self.ISA_2_c = self.pre_train_extroctor.ISA_2_c
239 | self.ISA_2_b = self.pre_train_extroctor.ISA_2_b
240 | self.ISA_2_p = self.pre_train_extroctor.ISA_2_p
241 | self.ISA_2_ca = self.pre_train_extroctor.ISA_2_ca
242 | self.ISA_2_sa = self.pre_train_extroctor.ISA_2_sa
243 | self.dropout_2 = nn.Dropout(0.2)
244 |
245 |
246 | del self.pre_train_extroctor
247 |
248 | self.fc = nn.Linear(512, 512)
249 |
250 | def forward(self, img):
251 |
252 | N = img.size()[0]
253 |
254 | # backbone
255 | lower_feature, higher_feature = self.backbone(img)
256 |
257 | # lower and higher transformation
258 | lf = self.lower_trans(lower_feature)
259 | hf = self.higher_trans(higher_feature)
260 | # print("lower_feature.shape:{}".format(lower_feature.shape))
261 | # print("higher_feature.shape:{}".format(higher_feature.shape))
262 | # lower_feature.shape: torch.Size([8, 64, 32, 32])
263 | # higher_feature.shape: torch.Size([8, 64, 32, 32])
264 | # print("=========================")
265 |
266 | # BCNN
267 | lower_feature = lf.view(N, 64, 32*32)
268 | higher_feature = hf.view(N, 64, 32*32)
269 | X = torch.bmm(lower_feature, torch.transpose(higher_feature, 1, 2)) / (32 ** 2)
270 |
271 | X = torch.sqrt(X + 1e-5)
272 | X = torch.nn.functional.normalize(X)
273 | X = torch.unsqueeze(X, dim=1)
274 |
275 | # print("fusion.shape:{}".format(X.shape))
276 | # fusion.shape: torch.Size([8, 1, 64, 64])
277 | # print("=========================")
278 |
279 | # image self attention
280 | X = self.ISA_1_c(X)
281 | X = self.ISA_1_b(X)
282 | X = self.ISA_1_p(X)
283 | X = self.ISA_1_ca(X) * X
284 | X = self.ISA_1_sa(X) * X
285 | X = self.ISA_1_pool(X)
286 | X = self.dropout_1(X)
287 |
288 | X = self.ISA_2_c(X)
289 | X = self.ISA_2_b(X)
290 | X = self.ISA_2_p(X)
291 | X = self.ISA_2_ca(X) * X
292 | X = self.ISA_2_sa(X) * X
293 | X = self.dropout_2(X)
294 |
295 | X = X.view(N, -1)
296 |
297 | X = self.fc(X)
298 |
299 | return l2norm(X, -1), lf, hf
300 |
301 | def params_count(model):
302 | count = 0
303 | for p in model.parameters():
304 | c = 1
305 | for i in range(p.dim()):
306 | c *= p.size(i)
307 | count += c
308 | return count
309 | #
310 | # model = ExtractFeature()
311 | # from torch.autograd import Variable
312 | # input = Variable(torch.zeros(10, 3, 256, 256))
313 | # feature = model(input)
314 | # print(feature.shape)
315 | # print(params_count(model))
316 | #
317 | # exit()
318 |
319 | # ----------------------------------------------------------------------------
320 | # ------------------------- Text Feature Encoding ------------------
321 |
322 | class textCNN(nn.Module):
323 | def __init__(self, vocab, opt, lstm_dropout=0.25, out_dropout=-1):
324 | super(textCNN, self).__init__()
325 | Vocab = len(vocab)+1 ## 已知词的数量
326 | Dim = 300 ##每个词向量长度
327 | Ci = 1 ##输入的channel数
328 | Knum = 100 ## 每种卷积核的数量
329 | Ks = [3,4,5] ## 卷积核list,形如[2,3,4]
330 | Cla = 512
331 |
332 | self.embed = nn.Embedding(Vocab, Dim) ## 词向量,这里直接随机
333 | self.convs = nn.ModuleList([nn.Conv2d(Ci, Knum, (K, Dim)) for K in Ks]) ## 卷积层
334 |
335 | self.dropout = nn.Dropout(lstm_dropout)
336 | self.fc = nn.Linear(len(Ks)*Knum,Cla) ##全连接层
337 |
338 | def forward(self, x):
339 | x = self.embed(x) # (8, 18, 300)
340 |
341 | x = x.unsqueeze(1) # (8, 1, 18, 300)
342 |
343 | x = [F.relu(conv(x)).squeeze(3) for conv in self.convs] # len(Ks)*(N,Knum,W)
344 |
345 | x = [F.max_pool1d(line, line.size(2)).squeeze(2) for line in x] # len(Ks)*(N,Knum)
346 |
347 | x = torch.cat(x, 1) # (N,Knum*len(Ks))
348 |
349 | x = self.dropout(x) # (N, len(Ks)*Co)
350 | logit = self.fc(x) # (N, C)
351 |
352 | return l2norm(logit, -1)
353 |
354 | def cosine_sim(im, s):
355 | """Cosine similarity between all the image and sentence pairs
356 | """
357 | # w1 = l2norm(im, dim=-1)
358 | # w2 = l2norm(s, dim=-1)
359 | w12 = im.mm(s.t())
360 | return w12
361 |
--------------------------------------------------------------------------------
/predict/layers/__pycache__/AMFMN.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/layers/__pycache__/AMFMN.cpython-37.pyc
--------------------------------------------------------------------------------
/predict/layers/__pycache__/AMFMN_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/layers/__pycache__/AMFMN_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/predict/layers/__pycache__/seq2vec.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/layers/__pycache__/seq2vec.cpython-37.pyc
--------------------------------------------------------------------------------
/predict/layers/seq2vec.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------
2 | # "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information"
3 | # Yuan, Zhiqiang and Zhang, Wenkai and Changyuan Tian and Xuee, Rong and Zhengyuan Zhang and Wang, Hongqi and Fu, Kun and Sun, Xian
4 | # Writen by YuanZhiqiang, 2021. Our code is depended on AMFMN
5 | # ------------------------------------------------------------
6 |
7 | # A revision version from Skip-thoughs
8 | import skipthoughts
9 |
10 |
11 | def factory(vocab_words, opt , dropout=0.25):
12 | if opt['arch'] == 'skipthoughts':
13 | st_class = getattr(skipthoughts, opt['type'])
14 | seq2vec = st_class(opt['dir_st'],
15 | vocab_words,
16 | dropout=dropout,
17 | fixed_emb=opt['fixed_emb'])
18 |
19 | else:
20 | raise NotImplementedError
21 | return seq2vec
22 |
--------------------------------------------------------------------------------
/predict/model_encoder.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/05
6 |
7 | import nltk
8 | import numpy as np
9 | import torch
10 | import torchvision.transforms as transforms
11 | from PIL import Image
12 |
13 |
14 | def l2norm(X, dim, eps=1e-8):
15 | """L2-normalize columns of X
16 | """
17 | norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
18 | X = torch.div(X, norm)
19 | return X
20 |
21 | # Encoder for LW_MCR
22 | class EncoderLWMCR:
23 | def image_encoder(self, model, image_path):
24 | transform = transforms.Compose([
25 | transforms.Resize((256, 256)),
26 | transforms.ToTensor(),
27 | transforms.Normalize((0.485, 0.456, 0.406),
28 | (0.229, 0.224, 0.225))])
29 |
30 | # data preprocessing
31 | image = Image.open(image_path).convert('RGB')
32 | image = transform(image) # torch.Size([3, 256, 256])
33 | image = torch.unsqueeze(image, dim=0).cuda()
34 |
35 | visual_feature, lower_feature, higher_feature = model.extract_feature(image)
36 | global_feature = l2norm(visual_feature, dim=-1)
37 |
38 | # to cpu vector
39 | vector = global_feature.cpu().detach().numpy()[0]
40 |
41 | return vector
42 |
43 | def text_encoder(self, model, vocab, text):
44 |
45 | # Convert caption (string) to word ids.
46 | tokens = nltk.tokenize.word_tokenize(text.lower())
47 | punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
48 | tokens = [k for k in tokens if k not in punctuations]
49 | tokens_UNK = [k if k in vocab.word2idx.keys() else '' for k in tokens]
50 |
51 | caption = []
52 | caption.extend([vocab(token) for token in tokens_UNK])
53 | caption = torch.LongTensor(caption)
54 | caption = torch.unsqueeze(caption, dim=0).cuda()
55 |
56 | # model processing
57 | text_feature = model.text_feature(caption)
58 | text_feature = l2norm(text_feature, dim=-1)
59 |
60 | # to cpu vector
61 | vector = text_feature.cpu().detach().numpy()[0]
62 |
63 | return vector
64 |
65 | # Encoder for AMFMN
66 | class EncoderAMFMN:
67 | def image_encoder(self, model, image_path):
68 | transform = transforms.Compose([
69 | transforms.Resize((256, 256)),
70 | transforms.ToTensor(),
71 | transforms.Normalize((0.485, 0.456, 0.406),
72 | (0.229, 0.224, 0.225))])
73 | # check image size
74 | # image_path = trans_bigimage_to_small(image_path)
75 |
76 | # data preprocessing
77 | image = Image.open(image_path).convert('RGB')
78 | image = transform(image) # torch.Size([3, 256, 256])
79 | image = torch.unsqueeze(image, dim=0).cuda()
80 |
81 | # model processing
82 | lower_feature, higher_feature, solo_feature = model.extract_feature(image)
83 | global_feature = model.mvsa(lower_feature, higher_feature, solo_feature)
84 | global_feature = l2norm(global_feature, dim=-1)
85 |
86 | # to cpu vector
87 | vector = global_feature.cpu().detach().numpy()[0]
88 |
89 | return vector
90 |
91 | def text_encoder(self, model, vocab, text):
92 |
93 | # Convert caption (string) to word ids.
94 | tokens = nltk.tokenize.word_tokenize(text.lower())
95 | punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
96 | tokens = [k for k in tokens if k not in punctuations]
97 | tokens_UNK = [k if k in vocab.word2idx.keys() else '' for k in tokens]
98 |
99 | caption = []
100 | caption.extend([vocab(token) for token in tokens_UNK])
101 | caption = torch.LongTensor(caption)
102 | caption = torch.unsqueeze(caption, dim=0).cuda()
103 | caption = caption.expand((2, caption.shape[-1]))
104 |
105 | # model processing
106 | text_feature = model.text_feature(caption)
107 | text_feature = l2norm(text_feature, dim=-1)
108 |
109 | # to cpu vector
110 | vector = text_feature.cpu().detach().numpy()[0]
111 |
112 | return vector
113 |
114 | class Encoder:
115 | def __init__(self, model):
116 | if model.model_name == 'AMFMN':
117 | self.encoder = EncoderAMFMN()
118 | elif model.model_name == 'LW_MCR':
119 | self.encoder = EncoderLWMCR()
120 | else:
121 | raise NotImplementedError
122 |
123 | def cosine_sim(self, image_vector, text_vector):
124 | """
125 | 计算两个向量间的余弦相似度
126 | :param image_vector: 图片编码向量
127 | :param text_vector: 文本编码向量
128 | :return: 相似度
129 | """
130 | if hasattr(self.encoder, 'calc_similarity'):
131 | return self.encoder.calc_similarity(image_vector, text_vector)
132 | else:
133 | image_vector = image_vector / np.linalg.norm(image_vector)
134 | text_vector = text_vector / np.linalg.norm(text_vector)
135 |
136 | similarity = np.mean(np.multiply(image_vector, text_vector))
137 | return similarity
138 |
139 | def image_encoder(self, model, image_path):
140 | """
141 | 提供的图像编码函数
142 | :param model: 模型文件
143 | :param image_path: 图像路径
144 | :return: 编码向量
145 | """
146 |
147 | return self.encoder.image_encoder(model, image_path)
148 |
149 | def text_encoder(self, model, vocab, text):
150 | """
151 | 提供的文本编码函数
152 | :param model: 模型文件
153 | :param vocab: 文本字典
154 | :param text: 编码文本
155 | :return: 编码向量
156 | """
157 |
158 | return self.encoder.text_encoder(model, vocab, text)
159 |
160 |
161 | if __name__ == "__main__":
162 | from model_init import model_init
163 |
164 | prefix = "./"
165 | yaml_path = "option/RSITMD/RSITMD_AMFMN.yaml"
166 | test_jpg = "./test_data/sparseresidential_3814.tif"
167 | test_caption = "many airplane parked in the airport"
168 |
169 | # init model
170 | model, vocab = model_init(
171 | prefix_path = "./",
172 | yaml_path = yaml_path
173 | )
174 |
175 | # encoder
176 | encoder = Encoder(model)
177 | visual_vector = encoder.image_encoder(model, test_jpg)
178 | text_vector = encoder.text_encoder(model, vocab, test_caption)
179 |
180 | print("visual_vector:", np.shape(visual_vector))
181 | print("text_vector:", np.shape(text_vector))
182 |
183 | if len(visual_vector) == len(text_vector):
184 | print("Encoder test successful!")
185 |
186 | sims = encoder.cosine_sim(visual_vector, text_vector)
187 | print("Calc sim successful!")
188 |
189 |
190 |
191 |
192 |
193 |
194 |
--------------------------------------------------------------------------------
/predict/model_init.py:
--------------------------------------------------------------------------------
1 | #encoding:utf-8
2 | # -----------------------------------------------------------
3 | # "Exploring a Fine-Grained Multiscale Method for Cross-Modal Remote Sensing Image Retrieval"
4 | # Yuan, Zhiqiang and Zhang, Wenkai and Fu, Kun and Li, Xuan and Deng, Chubo and Wang, Hongqi and Sun, Xian
5 | # IEEE Transactions on Geoscience and Remote Sensing 2021
6 | # Writen by YuanZhiqiang, 2021. Our code is depended on MTFN
7 | # ------------------------------------------------------------
8 |
9 | import os
10 |
11 | import torch
12 | import yaml
13 | from vocabs import deserialize_vocab
14 |
15 |
16 | def parser_options(prefix_path, yaml_path):
17 | # load model options
18 | with open(os.path.join(prefix_path,yaml_path), 'r') as handle:
19 | options = yaml.safe_load(handle)
20 |
21 | return options
22 |
23 | def model_init(prefix_path, yaml_path):
24 | options = parser_options(prefix_path, yaml_path)
25 |
26 | # choose model
27 | if options['model']['name'] == "AMFMN":
28 | from layers import AMFMN as models
29 | elif options['model']['name'] == "LW_MCR":
30 | from layers import LW_MCR as models
31 | else:
32 | raise NotImplementedError
33 |
34 | # make vocab
35 | vocab = deserialize_vocab(os.path.join(prefix_path,options['dataset']['vocab_path']))
36 | vocab_word = sorted(vocab.word2idx.items(), key=lambda x: x[1], reverse=False)
37 | vocab_word = [tup[0] for tup in vocab_word]
38 |
39 | model = models.factory(options['model'],
40 | vocab_word,
41 | cuda=True,
42 | data_parallel=False)
43 |
44 | checkpoint = torch.load(options['optim']['resume'])
45 | model.load_state_dict(checkpoint['model'], strict=True)
46 | model.eval()
47 |
48 | return model, vocab
49 |
50 | if __name__ == "__main__":
51 | prefix = "./"
52 | yaml_path = "option/RSITMD/RSITMD_LW_MCR.yaml"
53 |
54 | model, vocab = model_init(
55 | prefix_path = "./",
56 | yaml_path = yaml_path
57 | )
58 |
59 | try:
60 | model.eval()
61 | print(vocab)
62 | print("Successfully load the model.")
63 | except Exception as e:
64 | print("Failed to load the model.")
--------------------------------------------------------------------------------
/predict/option/RSITMD/RSITMD_AMFMN.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | multiscale:
3 | multiscale_input_channel: 3
4 | multiscale_output_channel: 1
5 | cross_attention:
6 | att_type: "soft_att"
7 | seq2vec:
8 | arch: skipthoughts
9 | dir_st: /data
10 | type: BayesianUniSkip
11 | dropout: 0.25
12 | fixed_emb: False
13 | fusion:
14 | correct_local_hidden_dim: 512
15 | correct_local_hidden_drop: 0.2
16 | supplement_global_hidden_dim: 512
17 | supplement_global_hidden_drop: 0.2
18 | dynamic_fusion_dim: 512
19 | dynamic_fusion_drop: 0.2
20 | mca_DROPOUT_R: 0.1
21 | mca_HIDDEN_SIZE: 512
22 | mca_FF_SIZE: 1024
23 | mca_MULTI_HEAD: 8
24 | mca_HIDDEN_SIZE_HEAD: 64
25 | embed:
26 | embed_dim: 512
27 | global_local_weight:
28 | global: None
29 | local: None
30 | name: AMFMN
31 | dataset:
32 | datatype: rsitmd
33 | data_split:
34 | data_path: 'data/rsitmd_precomp/'
35 | image_path: '/data/rsitmd_images/'
36 | vocab_path: 'vocab/rsitmd_splits_vocab.json'
37 | local_path: './detection/representation/RSITMD/rsitmd_local.npy'
38 | batch_size: 100
39 | batch_size_val: 70
40 | workers: 3
41 | optim:
42 | epochs: 50
43 | lr: 0.0002
44 | lr_decay_param: 0.7
45 | lr_update_epoch: 20
46 | grad_clip: 0
47 | max_violation: 0
48 | margin: 0.2
49 | resume: "./checkpoints/AMFMN.tar"
50 | logs:
51 | eval_step: 1
52 | print_freq: 10
53 | ckpt_save_path: "checkpoints/RSITMD_AMFMN.tar"
54 | logger_name: 'logs/'
55 | k_fold:
56 | experiment_name: 'rsitmd_AMFMN'
57 | nums: 1
58 | current_num: 0
59 |
--------------------------------------------------------------------------------
/predict/option/RSITMD/RSITMD_LW_MCR.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | multiscale:
3 | multiscale_input_channel: 3
4 | multiscale_output_channel: 1
5 | cross_attention:
6 | att_type: "soft_att"
7 | seq2vec:
8 | arch: skipthoughts
9 | dir_st: /data
10 | type: BayesianUniSkip
11 | dropout: 0.25
12 | fixed_emb: False
13 | fusion:
14 | correct_local_hidden_dim: 512
15 | correct_local_hidden_drop: 0.2
16 | supplement_global_hidden_dim: 512
17 | supplement_global_hidden_drop: 0.2
18 | dynamic_fusion_dim: 512
19 | dynamic_fusion_drop: 0.2
20 | mca_DROPOUT_R: 0.1
21 | mca_HIDDEN_SIZE: 512
22 | mca_FF_SIZE: 1024
23 | mca_MULTI_HEAD: 8
24 | mca_HIDDEN_SIZE_HEAD: 64
25 | embed:
26 | embed_dim: 512
27 | global_local_weight:
28 | global: None
29 | local: None
30 | name: LW_MCR
31 | dataset:
32 | datatype: rsitmd
33 | data_split:
34 | data_path: 'data/rsitmd_precomp/'
35 | image_path: '/data/rsitmd_images/'
36 | vocab_path: 'vocab/rsitmd_splits_vocab.json'
37 | local_path: './detection/representation/RSITMD/rsitmd_local.npy'
38 | batch_size: 100
39 | batch_size_val: 70
40 | workers: 3
41 | optim:
42 | epochs: 50
43 | lr: 0.0002
44 | lr_decay_param: 0.7
45 | lr_update_epoch: 20
46 | grad_clip: 0
47 | max_violation: 0
48 | margin: 0.2
49 | resume: "/workspace/slm/predict/checkpoints/RSITMD_LW_MCR.tar"
50 | logs:
51 | eval_step: 1
52 | print_freq: 10
53 | ckpt_save_path: "checkpoints/LW_MCR.tar"
54 | logger_name: 'logs/'
55 | k_fold:
56 | experiment_name: 'rsitmd_LW_MCR'
57 | nums: 1
58 | current_num: 0
59 |
--------------------------------------------------------------------------------
/predict/test_data/sparseresidential_3814.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyuan1996/SemanticLocalizationMetrics/1e9596ded8dbdb83ea534987f8c2e2c6ee3f1d20/predict/test_data/sparseresidential_3814.tif
--------------------------------------------------------------------------------
/predict/util/convert_data.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------
2 | # Stacked Cross Attention Network implementation based on
3 | # https://arxiv.org/abs/1803.08024.
4 | # "Stacked Cross Attention for Image-Text Matching"
5 | # Kuang-Huei Lee, Xi Chen, Gang Hua, Houdong Hu, Xiaodong He
6 | #
7 | # Writen by Kuang-Huei Lee, 2018
8 | # ---------------------------------------------------------------
9 | """Convert image features from bottom up attention to numpy array"""
10 | import argparse
11 | import base64
12 | import csv
13 | import os
14 | import sys
15 |
16 | import numpy as np
17 |
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument('--imgid_list', default='../data/coco_precomp/train_ids.txt',
20 | help='Path to list of image id')
21 | parser.add_argument('--input_file', default=['../data/bu_data/trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0'],
22 | # ,'../data/bu_data/trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1'],
23 | help='tsv of all image data (output of bottom-up-attention/tools/generate_tsv.py), \
24 | where each columns are: [image_id, image_w, image_h, num_boxes, boxes, features].')
25 | parser.add_argument('--output_dir', default='../data/coco_precomp/',
26 | help='Output directory.')
27 | parser.add_argument('--split', default='train',
28 | help='train|dev|test')
29 | opt = parser.parse_args()
30 | print(opt)
31 |
32 |
33 | meta = []
34 | feature = {}
35 | for line in open(opt.imgid_list):
36 | sid = int(line.strip())
37 | meta.append(sid)
38 | feature[sid] = None
39 |
40 | maxInt = sys.maxsize
41 |
42 | while True:
43 | # decrease the maxInt value by factor 10
44 | # as long as the OverflowError occurs.
45 |
46 | try:
47 | csv.field_size_limit(maxInt)
48 | break
49 | except OverflowError:
50 | maxInt = int(maxInt/10)
51 |
52 | FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
53 |
54 | if __name__ == '__main__':
55 | for input_file in opt.input_file:
56 | with open(input_file, "r+t") as tsv_in_file:
57 | reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
58 | for item in reader:
59 | item['image_id'] = int(item['image_id'])
60 | item['image_h'] = int(item['image_h'])
61 | item['image_w'] = int(item['image_w'])
62 | item['num_boxes'] = int(item['num_boxes'])
63 | for field in ['boxes', 'features']:
64 | item[field] = np.frombuffer(base64.decodestring(item[field].encode()),
65 | dtype=np.float32).reshape((item['num_boxes'],-1))
66 | if item['image_id'] in feature:
67 | feature[item['image_id']] = item['features']
68 |
69 | # Padding
70 | data_out = []
71 | for sid in meta:
72 | padding_data = np.zeros((36,2048))
73 | region_num = np.shape(feature[sid])[0]
74 | if region_num <= 36:
75 | padding_data[:region_num, :] = feature[sid]
76 | else:
77 | padding_data = feature[sid][:36, :]
78 | data_out.append(padding_data)
79 |
80 | data_out = np.stack(data_out, axis=0)
81 | print("Final numpy array shape:", data_out.shape)
82 | np.save(os.path.join(opt.output_dir, '{}_ims.npy'.format(opt.split)), data_out)
--------------------------------------------------------------------------------
/predict/utils.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/05/05
6 |
7 | import logging
8 | import os
9 | import pickle
10 | import random
11 |
12 | import numpy as np
13 | import yaml
14 |
15 |
16 | # 加载参数
17 | def get_config(path_opt="common/config.yaml"):
18 | with open(path_opt, 'r') as handle:
19 | options = yaml.load(handle)
20 | return options
21 |
22 | # 储存为npy文件
23 | def save_to_npy(info, filename):
24 | np.save(filename,info,allow_pickle=True)
25 |
26 | # 从npy中读取
27 | def load_from_npy(filename):
28 | info = np.load(filename, allow_pickle=True)
29 | return info
30 |
31 | # 得到标准返回
32 | def get_stand_return(flag, message):
33 | code = 200 if flag else 400
34 | return_json = {
35 | 'code':code,
36 | "message": message
37 | }
38 | return return_json
39 |
40 | # 创建文件夹
41 | def create_dirs(path):
42 | if not os.path.exists(path):
43 | os.makedirs(path)
44 |
45 | # ========================== 字典操作 =============================
46 | # 保存字典
47 | def dict_save(obj, name="rsd.pkl"):
48 | with open(name, 'wb') as f:
49 | pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
50 |
51 | # 读取字典
52 | def dict_load(name="rsd.pkl"):
53 | with open(name, 'rb') as f:
54 | return pickle.load(f)
55 |
56 | # 删除键值
57 | def dict_delete(k, rsd):
58 | rsd.pop(k)
59 | return rsd
60 |
61 | # 插入键值
62 | def dict_insert(k, v, rsd):
63 | rsd[k] = v
64 | return rsd
65 |
66 | # 按照键值进行排序 输出排序好的键
67 | def sort_based_values(sims_dict):
68 | """
69 | :param sims_dict: dict
70 | {
71 | '1': 0.2,
72 | "2": 0.4,
73 | "3": 0.3,
74 | "4": 0.23
75 | }
76 | :return: key 降序 ['2', '3', '4', '1']
77 | """
78 | sims_dict = sorted(sims_dict.items(), key=lambda item: item[1])[::-1]
79 | return [i[0] for i in sims_dict]
80 |
81 |
82 | # ============================ 加载或新建 retrieval system data =========================
83 | def init_rsd(file_path):
84 | if os.path.exists(file_path):
85 | rsd = dict_load(file_path)
86 | else:
87 | rsd = {}
88 | return rsd
89 |
90 | # 删除文件夹及对应文件
91 | def delete_dire(dire):
92 | dir_list = []
93 | for root, dirs, files in os.walk(dire):
94 | for afile in files:
95 | os.remove(os.path.join(root, afile))
96 | for adir in dirs:
97 | dir_list.append(os.path.join(root, adir))
98 | for bdir in dir_list:
99 | os.rmdir(bdir)
100 |
101 | # 保存结果到txt文件
102 | def log_to_txt( contexts=None,filename="save.txt", mark=False,encoding='UTF-8',add_n=False,mode='a'):
103 | f = open(filename, mode,encoding=encoding)
104 | if mark:
105 | sig = "------------------------------------------------\n"
106 | f.write(sig)
107 | elif isinstance(contexts, dict):
108 | tmp = ""
109 | for c in contexts.keys():
110 | tmp += str(c)+" | "+ str(contexts[c]) +"\n"
111 | contexts = tmp
112 | f.write(contexts)
113 | else:
114 | if isinstance(contexts,list):
115 | tmp = ""
116 | for c in contexts:
117 | if add_n:
118 | tmp += str(c) + " " + "\n"
119 | else:
120 | tmp += str(c) + " "
121 | contexts = tmp
122 | else:
123 | contexts = contexts + "\n"
124 | f.write(contexts)
125 |
126 |
127 | f.close()
128 |
129 | # 从txt中读取行
130 | def load_from_txt(filename, encoding="utf-8"):
131 | f = open(filename,'r' ,encoding=encoding)
132 | contexts = f.readlines()
133 | return contexts
134 |
135 | # 创建随机文件夹
136 | def create_random_dirs_name(dir_path):
137 | dirs = os.listdir(dir_path)
138 | new_dir = ""
139 | while (new_dir == "") or (new_dir in dirs):
140 | new_dir = "".join(random.sample('1234567890qwertyuiopasdfghjklzxcvbnm', 8))
141 | return new_dir
142 |
143 | # logger
144 | def get_logger(save_path=None):
145 | logger = logging.getLogger()
146 | logger.setLevel(logging.INFO) # 设置打印级别
147 | formatter = logging.Formatter('%(asctime)s %(message)s')
148 |
149 | # 设置屏幕打印的格式
150 | sh = logging.StreamHandler()
151 | sh.setFormatter(formatter)
152 | logger.addHandler(sh)
153 |
154 | # 设置log保存
155 | if save_path != None:
156 | fh = logging.FileHandler(save_path, encoding='utf8')
157 | fh.setFormatter(formatter)
158 | logger.addHandler(fh)
159 |
160 | return logger
--------------------------------------------------------------------------------
/predict/vocab/rsitmd_splits_vocab.json:
--------------------------------------------------------------------------------
1 | {"word2idx": {"baseball": 1, "field": 2, "beside": 3, "green": 4, "amusement": 5, "park": 6, "around": 7, "red": 8, "track": 9, "adjacent": 10, "playground": 11, "square": 12, "long": 13, "path": 14, "next": 15, "runway": 16, "two": 17, "white": 18, "houses": 19, "located": 20, "middle": 21, "lawn": 22, "fields": 23, "different": 24, "sizes": 25, "stadium": 26, "stand": 27, "big": 28, "side-by-side": 29, "road": 30, "side": 31, "football": 32, "passes": 33, "gray": 34, "bare": 35, "soil": 36, "close": 37, "alongside": 38, "piece": 39, "land": 40, "separates": 41, "house": 42, "separating": 43, "octagonal": 44, "buildings": 45, "colors": 46, "building": 47, "parking": 48, "lot": 49, "full": 50, "cars": 51, "tennis": 52, "courts": 53, "grey": 54, "roadside": 55, "one": 56, "intersection": 57, "roads": 58, "crossed": 59, "highway": 60, "parallel": 61, "pass": 62, "streets": 63, "lined": 64, "trees": 65, "empty": 66, "sides": 67, "lots": 68, "deep": 69, "black": 70, "river": 71, "blue": 72, "factory": 73, "boats": 74, "moored": 75, "many": 76, "parked": 77, "bank": 78, "three": 79, "anchored": 80, "respectively": 81, "turquoise": 82, "ships": 83, "area": 84, "goods": 85, "boat": 86, "quietly": 87, "thin": 88, "bridge": 89, "sail": 90, "dark": 91, "sea": 92, "harbor": 93, "small": 94, "floating": 95, "port": 96, "floated": 97, "ship": 98, "near": 99, "dock": 100, "yellow": 101, "lay": 102, "across": 103, "crossroads": 104, "sailed": 105, "fast": 106, "calm": 107, "sailing": 108, "deck": 109, "purple": 110, "dry": 111, "cargo": 112, "striped": 113, "coast": 114, "stripes": 115, "shore": 116, "water": 117, "either": 118, "car": 119, "left": 120, "wave": 121, "heihe": 122, "pools": 123, "swimming": 124, "opposite": 125, "facing": 126, "moving": 127, "colorful": 128, "board": 129, "belt": 130, "curved": 131, "crosses": 132, "directly": 133, "make": 134, "way": 135, "residential": 136, "crossing": 137, "intersects": 138, "areas": 139, "plant": 140, "belts": 141, "rivers": 142, "surrounded": 143, "densely": 144, "populated": 145, "decorated": 146, "hand": 147, "open": 148, "grassland": 149, "high-rise": 150, "grass": 151, "skyscrapers": 152, "sits": 153, "tall": 154, "ellipse": 155, "shapes": 156, "oval": 157, "built": 158, "kind": 159, "light": 160, "bifurcation": 161, "street": 162, "train": 163, "railway": 164, "directions": 165, "tracks": 166, "planes": 167, "plane": 168, "large": 169, "crowded": 170, "separated": 171, "jagged": 172, "zigzag": 173, "lying": 174, "vehicles": 175, "airport": 176, "ground": 177, "stopped": 178, "dust": 179, "five": 180, "docked": 181, "sparkling": 182, "face": 183, "rectangular": 184, "together": 185, "line": 186, "several": 187, "edge": 188, "playgrounds": 189, "six": 190, "table": 191, "balls": 192, "semicircular": 193, "orange": 194, "dense": 195, "forest": 196, "thick": 197, "connected": 198, "lake": 199, "pond": 200, "lush": 201, "tree": 202, "connects": 203, "court": 204, "vacant": 205, "paths": 206, "expanse": 207, "run": 208, "runways": 209, "door": 210, "winding": 211, "covered": 212, "shaded": 213, "ponds": 214, "brown": 215, "spectators": 216, "spectator": 217, "audience": 218, "quite": 219, "lawns": 220, "divide": 221, "stood": 222, "four": 223, "bareland": 224, "leans": 225, "surround": 226, "front": 227, "terminal": 228, "others": 229, "besides": 230, "sparse": 231, "meadow": 232, "lines": 233, "inside": 234, "plants": 235, "neat": 236, "arranged": 237, "round": 238, "church": 239, "sit": 240, "aeroplanes": 241, "apron": 242, "flight": 243, "ploygon": 244, "main": 245, "netlike": 246, "cross": 247, "polygonal": 248, "passenger": 249, "termial": 250, "strip": 251, "shaped": 252, "viaducts": 253, "narrow": 254, "star": 255, "like": 256, "boomerang": 257, "aircraft": 258, "corner": 259, "take": 260, "running": 261, "seven": 262, "tarmac": 263, "boarding": 264, "gate": 265, "landside": 266, "airside": 267, "aircrafts": 268, "tarmacs": 269, "grassy": 270, "waiting": 271, "straight": 272, "standing": 273, "bar": 274, "airstrip": 275, "terminals": 276, "airplanes": 277, "circular": 278, "stretch": 279, "sparsely": 280, "rounded": 281, "rectangle": 282, "surrounds": 283, "radial": 284, "symmetrical": 285, "triangle": 286, "curve": 287, "seated": 288, "wasteland": 289, "polygon": 290, "irregular": 291, "paralleled": 292, "sandwiched": 293, "orderly": 294, "majestic": 295, "surrounding": 296, "arc": 297, "ways": 298, "pieces": 299, "meadows": 300, "strips": 301, "neatly": 302, "trapezoid": 303, "divided": 304, "outside": 305, "complicated": 306, "towers": 307, "cut": 308, "scattered": 309, "airplane": 310, "lamp": 311, "eight": 312, "sitting": 313, "pale": 314, "pure": 315, "y-shaped": 316, "berthing": 317, "conspicuous": 318, "beautiful": 319, "stands": 320, "especially": 321, "packed": 322, "silver": 323, "passed": 324, "wide": 325, "dozens": 326, "e-shaped": 327, "number": 328, "plain": 329, "seen": 330, "station": 331, "t-shaped": 332, "room": 333, "appeared": 334, "fragmented": 335, "terrain": 336, "nothing": 337, "stop": 338, "tower": 339, "blocks": 340, "composed": 341, "ball": 342, "u": 343, "smaller": 344, "single": 345, "farmland": 346, "simple": 347, "along": 348, "order": 349, "taxiing": 350, "rows": 351, "parts": 352, "corners": 353, "squared": 354, "stayed": 355, "nearby": 356, "traverse": 357, "contains": 358, "including": 359, "circle": 360, "ring": 361, "consists": 362, "arc-shaped": 363, "rectangles": 364, "separate": 365, "see": 366, "squares": 367, "wild": 368, "aprons": 369, "bright": 370, "block": 371, "facilities": 372, "complete": 373, "industrial": 374, "people": 375, "flat": 376, "spaces": 377, "huge": 378, "vast": 379, "without": 380, "reddish": 381, "meet": 382, "reddish-brown": 383, "obvious": 384, "marks": 385, "floor": 386, "tract": 387, "traces": 388, "clay": 389, "truck": 390, "driving": 391, "hill": 392, "mark": 393, "vegetation": 394, "khaki": 395, "'s": 396, "work": 397, "space": 398, "sand": 399, "leaking": 400, "looks": 401, "uneven": 402, "planted": 403, "surface": 404, "rugged": 405, "intersect": 406, "vertically": 407, "courtyard": 408, "patch": 409, "shape": 410, "interspersed": 411, "messy": 412, "undulating": 413, "wavy": 414, "sandy": 415, "dotted": 416, "bleak": 417, "dirt": 418, "desert": 419, "stretches": 420, "greenland": 421, "coner": 422, "form": 423, "rough": 424, "formed": 425, "fork": 426, "lie": 427, "traverses": 428, "shallow": 429, "clear": 430, "stripe": 431, "go": 432, "alleys": 433, "runs": 434, "earth": 435, "weeds": 436, "growing": 437, "waste": 438, "broken": 439, "stone": 440, "wheel": 441, "naked": 442, "country": 443, "place": 444, "grows": 445, "amount": 446, "roofs": 447, "kinds": 448, "thing": 449, "things": 450, "part": 451, "irregularly": 452, "blended": 453, "littered": 454, "dots": 455, "half": 456, "rock": 457, "hills": 458, "patterns": 459, "pattern": 460, "lands": 461, "use": 462, "wheels": 463, "holes": 464, "also": 465, "tree-lined": 466, "family": 467, "trucks": 468, "behind": 469, "barren": 470, "flowing": 471, "hillside": 472, "slope": 473, "center": 474, "forming": 475, "flower": 476, "luxuriant": 477, "flowers": 478, "row": 479, "top": 480, "view": 481, "gymnasium": 482, "circles": 483, "quarter": 484, "stadiums": 485, "fan-shaped": 486, "lakes": 487, "silvery": 488, "golf": 489, "course": 490, "filled": 491, "clean": 492, "places": 493, "enough": 494, "smooth": 495, "met": 496, "triangular": 497, "faces": 498, "perfect": 499, "rooms": 500, "players": 501, "lane": 502, "freeway": 503, "baseballfield": 504, "red-roofed": 505, "semi-surrounded": 506, "fan": 507, "fanshaped": 508, "reflections": 509, "distributed": 510, "occupies": 511, "pentagonal": 512, "basketball": 513, "forms": 514, "semicircle": 515, "baseballfields": 516, "another": 517, "sports": 518, "size": 519, "back": 520, "soccer": 521, "pitch": 522, "pitches": 523, "neighborhood": 524, "sized": 525, "highways": 526, "woods": 527, "vertical": 528, "routes": 529, "flanked": 530, "among": 531, "iron": 532, "solar": 533, "pool": 534, "grounds": 535, "bleachers": 536, "enclosed": 537, "coconut": 538, "edges": 539, "used": 540, "reflection": 541, "pentagon": 542, "devided": 543, "residence": 544, "lies": 545, "texture": 546, "items": 547, "addition": 548, "waves": 549, "ocean": 550, "beach": 551, "rocks": 552, "beat": 553, "saw": 554, "spectacular": 555, "magnificent": 556, "surging": 557, "spray": 558, "emerald": 559, "curves": 560, "oceans": 561, "float": 562, "patches": 563, "gradient": 564, "seems": 565, "color": 566, "prosperous": 567, "resort": 568, "thriving": 569, "fishing": 570, "ripples": 571, "look": 572, "forests": 573, "beaches": 574, "hit": 575, "picture": 576, "island": 577, "islands": 578, "quiet": 579, "high": 580, "blue-green": 581, "tourists": 582, "scenery": 583, "rocky": 584, "sands": 585, "right": 586, "dunes": 587, "soft": 588, "grasslands": 589, "grow": 590, "stones": 591, "series": 592, "coastline": 593, "dirty": 594, "waters": 595, "wet": 596, "rushed": 597, "city": 598, "layers": 599, "patting": 600, "wharf": 601, "living": 602, "mountain": 603, "short": 604, "boundary": 605, "thr": 606, "jungle": 607, "stained": 608, "dam": 609, "gently": 610, "ribbon": 611, "jade": 612, "ribbons": 613, "objects": 614, "meets": 615, "integrated": 616, "playing": 617, "play": 618, "sunshine": 619, "seaside": 620, "sun": 621, "connect": 622, "reaches": 623, "towards": 624, "golden": 625, "little": 626, "fish": 627, "found": 628, "yellowish": 629, "cyan": 630, "seas": 631, "umbrellas": 632, "yachts": 633, "better": 634, "stretching": 635, "extends": 636, "roofed": 637, "roof": 638, "shadows": 639, "shade": 640, "structure": 641, "scales": 642, "mountains": 643, "plots": 644, "groves": 645, "bottom": 646, "bridges": 647, "banks": 648, "flows": 649, "two-way": 650, "steel": 651, "made": 652, "spans": 653, "six-lane": 654, "frame": 655, "lanes": 656, "span": 657, "partially": 658, "types": 659, "broad": 660, "cast": 661, "shadow": 662, "muddy": 663, "similar": 664, "two-lane": 665, "turns": 666, "joins": 667, "crisscrossed": 668, "end": 669, "factories": 670, "bus": 671, "branches": 672, "traffic": 673, "yacht": 674, "decorations": 675, "pedestrians": 676, "various": 677, "spanning": 678, "connecting": 679, "valley": 680, "ones": 681, "plenty": 682, "bridege": 683, "suspension": 684, "tributary": 685, "cable": 686, "arch": 687, "divides": 688, "double": 689, "cities": 690, "farmlands": 691, "tiny": 692, "stopping": 693, "walking": 694, "bustling": 695, "low": 696, "saddle": 697, "confluence": 698, "turbid": 699, "goes": 700, "passing": 701, "traveling": 702, "cement": 703, "partly": 704, "riverbank": 705, "docks": 706, "non-parallel": 707, "slender": 708, "come": 709, "slowed": 710, "tanks": 711, "oil": 712, "intersections": 713, "villas": 714, "twists": 715, "reservoirs": 716, "storage": 717, "canal": 718, "wilderness": 719, "farm": 720, "arches": 721, "turning": 722, "villages": 723, "seperated": 724, "automobiles": 725, "tributaries": 726, "towns": 727, "diagonal": 728, "barelands": 729, "set": 730, "eyot": 731, "urban": 732, "ran": 733, "expressway": 734, "cruising": 735, "designed": 736, "design": 737, "conference": 738, "arcs": 739, "floors": 740, "convention": 741, "bend": 742, "tightly": 743, "centers": 744, "shell": 745, "intersecting": 746, "parks": 747, "wonderful": 748, "semi-circular": 749, "intensive": 750, "almost": 751, "crops": 752, "entrance": 753, "unique": 754, "central": 755, "away": 756, "memory": 757, "shows": 758, "concentric": 759, "public": 760, "plaza": 761, "trapezoidal": 762, "quadrilateral": 763, "leaf": 764, "eye": 765, "regular": 766, "hexagonal": 767, "rings": 768, "loop": 769, "hexagon": 770, "grid": 771, "lace": 772, "spherical": 773, "bowl": 774, "flyover": 775, "metal": 776, "esthetic": 777, "aesthetic": 778, "chess": 779, "silver-gray": 780, "nearly": 781, "incomplete": 782, "strange": 783, "grand": 784, "whole": 785, "vivid": 786, "gym": 787, "honeycomb": 788, "site": 789, "camp": 790, "pyramid": 791, "elliptical": 792, "construction": 793, "conical": 794, "glass": 795, "bushes": 796, "railways": 797, "placed": 798, "well": 799, "include": 800, "leaves": 801, "busy": 802, "layout": 803, "distribution": 804, "architectural": 805, "owns": 806, "centre": 807, "variety": 808, "region": 809, "styles": 810, "pink": 811, "moss": 812, "cathedral": 813, "great": 814, "dome": 815, "churches": 816, "spread": 817, "yard": 818, "properly": 819, "ordered": 820, "closed": 821, "semi": 822, "heart": 823, "mint": 824, "gree": 825, "pasture": 826, "every": 827, "includes": 828, "bricks": 829, "wooden": 830, "brick": 831, "garden": 832, "neighborhoods": 833, "junction": 834, "t-junction": 835, "old": 836, "flats": 837, "columns": 838, "cone": 839, "domes": 840, "sloping": 841, "cylindrical": 842, "structures": 843, "column": 844, "apartments": 845, "cruciform": 846, "redor": 847, "closely": 848, "linked": 849, "good": 850, "density": 851, "architecture": 852, "colored": 853, "bit": 854, "fine": 855, "umbrella": 856, "consisting": 857, "quadrangle": 858, "gathered": 859, "alley": 860, "wall": 861, "diamond": 862, "covers": 863, "blu": 864, "e": 865, "ceiling": 866, "flow": 867, "stream": 868, "mirror": 869, "reflected": 870, "bunch": 871, "average": 872, "rather": 873, "shrubs": 874, "nice": 875, "bent": 876, "commercial": 877, "business": 878, "district": 879, "concentrated": 880, "office": 881, "districts": 882, "downtown": 883, "well-organized": 884, "shopping": 885, "mixed": 886, "evergreen": 887, "numerous": 888, "home": 889, "section": 890, "viaduct": 891, "rare": 892, "travel": 893, "drive": 894, "skyscraper": 895, "ordinary": 896, "advertisements": 897, "compact": 898, "advertisement": 899, "rise": 900, "school": 901, "medium": 902, "apartment": 903, "edifices": 904, "height": 905, "overpass": 906, "relatively": 907, "standard": 908, "vary": 909, "distance": 910, "economic": 911, "developed": 912, "schools": 913, "mall": 914, "mainly": 915, "zone": 916, "split": 917, "major": 918, "commerce": 919, "beige": 920, "casting": 921, "billboard": 922, "community": 923, "sections": 924, "gardens": 925, "consist": 926, "sector": 927, "spot": 928, "convenient": 929, "territory": 930, "much": 931, "everywhere": 932, "bed": 933, "sharp": 934, "towering": 935, "villa": 936, "4th": 937, "encircle": 938, "serious": 939, "jams": 940, "nas": 941, "files": 942, "constitute": 943, "marked": 944, "range": 945, "organized": 946, "apart": 947, "compactly": 948, "wood": 949, "distribute": 950, "colours": 951, "pastures": 952, "aligned": 953, "cover": 954, "housing": 955, "shades": 956, "divids": 957, "crooked": 958, "oblique": 959, "came": 960, "rate": 961, "greening": 962, "growth": 963, "mostly": 964, "regularly": 965, "luxury": 966, "extending": 967, "never": 968, "stops": 969, "leisure": 970, "file": 971, "rooftop": 972, "village": 973, "trains": 974, "winds": 975, "homes": 976, "bem": 977, "tight": 978, "extend": 979, "arched": 980, "tem": 981, "agricultural": 982, "concrete": 983, "settled": 984, "tidy": 985, "angle": 986, "arrangement": 987, "past": 988, "regions": 989, "desolate": 990, "wrinkles": 991, "wrinkled": 992, "deserted": 993, "ca": 994, "n't": 995, "anything": 996, "oasis": 997, "deserts": 998, "cracks": 999, "scale": 1000, "dried": 1001, "figure": 1002, "stains": 1003, "layered": 1004, "pellets": 1005, "consistency": 1006, "visible": 1007, "grove": 1008, "curly": 1009, "crack": 1010, "spots": 1011, "roots": 1012, "tortuous": 1013, "thousands": 1014, "pile": 1015, "dusty": 1016, "arid": 1017, "boundless": 1018, "prairie": 1019, "endless": 1020, "resources": 1021, "nua": 1022, "scene": 1023, "dividing": 1024, "layer": 1025, "peaks": 1026, "band": 1027, "brook": 1028, "outline": 1029, "creamy": 1030, "cream": 1031, "pits": 1032, "snow": 1033, "complex": 1034, "textures": 1035, "petal": 1036, "colour": 1037, "rain": 1038, "really": 1039, "air": 1040, "fold": 1041, "wind": 1042, "gap": 1043, "looking": 1044, "steep": 1045, "turn": 1046, "dead": 1047, "trail": 1048, "alternating": 1049, "sidewalks": 1050, "farms": 1051, "coloured": 1052, "grown": 1053, "cultivated": 1054, "paddy": 1055, "rest": 1056, "ridge": 1057, "ridges": 1058, "landes": 1059, "mud": 1060, "depths": 1061, "crop": 1062, "loose": 1063, "checkered": 1064, "data": 1065, "points": 1066, "wheat": 1067, "leading": 1068, "luxurious": 1069, "uncultivated": 1070, "b": 1071, "forked": 1072, "bald": 1073, "polygons": 1074, "wooded": 1075, "town": 1076, "ramp": 1077, "nest": 1078, "clouds": 1079, "bush": 1080, "unusual": 1081, "families": 1082, "ranch": 1083, "landscape": 1084, "rich": 1085, "hut": 1086, "spring": 1087, "bottle": 1088, "footpath": 1089, "birds": 1090, "grandma": 1091, "flourishing": 1092, "ranges": 1093, "coverage": 1094, "clearly": 1095, "time": 1096, "twisted": 1097, "unpaved": 1098, "altitude": 1099, "tent": 1100, "zones": 1101, "tents": 1102, "v-shaped": 1103, "direction": 1104, "painted": 1105, "withered": 1106, "peak": 1107, "bud": 1108, "cloud": 1109, "shed": 1110, "laid": 1111, "industry": 1112, "workshops": 1113, "group": 1114, "workshop": 1115, "warehouse": 1116, "warehouses": 1117, "container": 1118, "containers": 1119, "paved": 1120, "betweeen": 1121, "barley": 1122, "cylinders": 1123, "basin": 1124, "modern": 1125, "nude": 1126, "belong": 1127, "tank": 1128, "rio": 1129, "whose": 1130, "overpasses": 1131, "valleys": 1132, "lively": 1133, "system": 1134, "pots": 1135, "cans": 1136, "gradually": 1137, "due": 1138, "problems": 1139, "turf": 1140, "prado": 1141, "perpendicular": 1142, "slightly": 1143, "worn": 1144, "boxes": 1145, "decorating": 1146, "bird": 1147, "folds": 1148, "sprouted": 1149, "yet": 1150, "loess": 1151, "fertile": 1152, "medium-sized": 1153, "hidden": 1154, "situated": 1155, "peaceful": 1156, "ten": 1157, "cottage": 1158, "cabins": 1159, "cottages": 1160, "smart": 1161, "residents": 1162, "walls": 1163, "u-shaped": 1164, "entertainment": 1165, "recreation": 1166, "independent": 1167, "still": 1168, "environment": 1169, "sidewalk": 1170, "venues": 1171, "forks": 1172, "leads": 1173, "rolling": 1174, "corrugated": 1175, "terraces": 1176, "eyes": 1177, "snows": 1178, "covering": 1179, "mountainous": 1180, "foot": 1181, "greek": 1182, "heart-shaped": 1183, "artificial": 1184, "lagoon": 1185, "ferris": 1186, "theme": 1187, "interesting": 1188, "outdoor": 1189, "interlaced": 1190, "equipped": 1191, "equipment": 1192, "children": 1193, "one-way": 1194, "seats": 1195, "position": 1196, "crammed": 1197, "hundreds": 1198, "asphalt": 1199, "spacious": 1200, "median": 1201, "ceilings": 1202, "pulled": 1203, "enter": 1204, "manner": 1205, "lost": 1206, "4": 1207, "vehicle": 1208, "platform": 1209, "goal": 1210, "crimson": 1211, "footballground": 1212, "game": 1213, "teaching": 1214, "educational": 1215, "jim": 1216, "twelve": 1217, "lead": 1218, "guys": 1219, "cemnet": 1220, "plastic": 1221, "intelligent": 1222, "hockey": 1223, "base": 1224, "south": 1225, "fica": 1226, "steps": 1227, "barefoot": 1228, "sky": 1229, "image": 1230, "fog": 1231, "mist": 1232, "diamonds": 1233, "combined": 1234, "spindle": 1235, "fountain": 1236, "stars": 1237, "takes": 1238, "l-shaped": 1239, "2": 1240, "ellipses": 1241, "ports": 1242, "harbour": 1243, "pier": 1244, "quality": 1245, "reach": 1246, "docking": 1247, "seawall": 1248, "basins": 1249, "sailboats": 1250, "regulated": 1251, "leave": 1252, "stations": 1253, "rail": 1254, "railroad": 1255, "facility": 1256, "spindle-shaped": 1257, "satellite": 1258, "awnings": 1259, "awning": 1260, "high-speed": 1261, "belongs": 1262, "subway": 1263, "gray-roofed": 1264, "garage": 1265, "guide": 1266, "transportation": 1267, "resorts": 1268, "tropical": 1269, "tourist": 1270, "ahead": 1271, "holiday": 1272, "chairs": 1273, "s-shaped": 1274, "palm": 1275, "roundabout": 1276, "herringbone": 1277, "rainforest": 1278, "wetlands": 1279, "flowed": 1280, "join": 1281, "saddle-shaped": 1282, "greenbelts": 1283, "cake": 1284, "campus": 1285, "northeast": 1286, "countless": 1287, "painting": 1288, "university": 1289, "northwest": 1290, "well-equipped": 1291, "students": 1292, "carefully": 1293, "c-shaped": 1294, "auditorium": 1295, "hot": 1296, "private": 1297, "photo": 1298, "beds": 1299, "hall": 1300, "monument": 1301, "lantern": 1302, "bell": 1303, "x-shaped": 1304, "hold": 1305, "tens": 1306, "new": 1307, "tripods": 1308, "stalls": 1309, "blearchers": 1310, "pipe": 1311, "storagetanks": 1312, "pipelines": 1313, "drums": 1314, "pipes": 1315, "reservoir": 1316, "identical": 1317, "pallets": 1318, "nine": 1319, "trays": 1320, "storerooms": 1321, "barrelan": 1322, "june": 1323, "columnar": 1324, "tanker": 1325, "loaded": 1326, "treatment": 1327, "jars": 1328, "refinery": 1329, "tankers": 1330, "storing": 1331, "jar": 1332, "bicycles": 1333, "barrels": 1334, "shrinking": 1335, "loops": 1336, "auxiliary": 1337, "eight-shaped": 1338, "flyovers": 1339, "ramps": 1340, "three-dimensional": 1341, "brush": 1342, "nested": 1343, "luggage": 1344, "turfs": 1345, "bunkers": 1346, "extremely": 1347, "20": 1348, "": 1349}, "idx2word": {"1": "baseball", "2": "field", "3": "beside", "4": "green", "5": "amusement", "6": "park", "7": "around", "8": "red", "9": "track", "10": "adjacent", "11": "playground", "12": "square", "13": "long", "14": "path", "15": "next", "16": "runway", "17": "two", "18": "white", "19": "houses", "20": "located", "21": "middle", "22": "lawn", "23": "fields", "24": "different", "25": "sizes", "26": "stadium", "27": "stand", "28": "big", "29": "side-by-side", "30": "road", "31": "side", "32": "football", "33": "passes", "34": "gray", "35": "bare", "36": "soil", "37": "close", "38": "alongside", "39": "piece", "40": "land", "41": "separates", "42": "house", "43": "separating", "44": "octagonal", "45": "buildings", "46": "colors", "47": "building", "48": "parking", "49": "lot", "50": "full", "51": "cars", "52": "tennis", "53": "courts", "54": "grey", "55": "roadside", "56": "one", "57": "intersection", "58": "roads", "59": "crossed", "60": "highway", "61": "parallel", "62": "pass", "63": "streets", "64": "lined", "65": "trees", "66": "empty", "67": "sides", "68": "lots", "69": "deep", "70": "black", "71": "river", "72": "blue", "73": "factory", "74": "boats", "75": "moored", "76": "many", "77": "parked", "78": "bank", "79": "three", "80": "anchored", "81": "respectively", "82": "turquoise", "83": "ships", "84": "area", "85": "goods", "86": "boat", "87": "quietly", "88": "thin", "89": "bridge", "90": "sail", "91": "dark", "92": "sea", "93": "harbor", "94": "small", "95": "floating", "96": "port", "97": "floated", "98": "ship", "99": "near", "100": "dock", "101": "yellow", "102": "lay", "103": "across", "104": "crossroads", "105": "sailed", "106": "fast", "107": "calm", "108": "sailing", "109": "deck", "110": "purple", "111": "dry", "112": "cargo", "113": "striped", "114": "coast", "115": "stripes", "116": "shore", "117": "water", "118": "either", "119": "car", "120": "left", "121": "wave", "122": "heihe", "123": "pools", "124": "swimming", "125": "opposite", "126": "facing", "127": "moving", "128": "colorful", "129": "board", "130": "belt", "131": "curved", "132": "crosses", "133": "directly", "134": "make", "135": "way", "136": "residential", "137": "crossing", "138": "intersects", "139": "areas", "140": "plant", "141": "belts", "142": "rivers", "143": "surrounded", "144": "densely", "145": "populated", "146": "decorated", "147": "hand", "148": "open", "149": "grassland", "150": "high-rise", "151": "grass", "152": "skyscrapers", "153": "sits", "154": "tall", "155": "ellipse", "156": "shapes", "157": "oval", "158": "built", "159": "kind", "160": "light", "161": "bifurcation", "162": "street", "163": "train", "164": "railway", "165": "directions", "166": "tracks", "167": "planes", "168": "plane", "169": "large", "170": "crowded", "171": "separated", "172": "jagged", "173": "zigzag", "174": "lying", "175": "vehicles", "176": "airport", "177": "ground", "178": "stopped", "179": "dust", "180": "five", "181": "docked", "182": "sparkling", "183": "face", "184": "rectangular", "185": "together", "186": "line", "187": "several", "188": "edge", "189": "playgrounds", "190": "six", "191": "table", "192": "balls", "193": "semicircular", "194": "orange", "195": "dense", "196": "forest", "197": "thick", "198": "connected", "199": "lake", "200": "pond", "201": "lush", "202": "tree", "203": "connects", "204": "court", "205": "vacant", "206": "paths", "207": "expanse", "208": "run", "209": "runways", "210": "door", "211": "winding", "212": "covered", "213": "shaded", "214": "ponds", "215": "brown", "216": "spectators", "217": "spectator", "218": "audience", "219": "quite", "220": "lawns", "221": "divide", "222": "stood", "223": "four", "224": "bareland", "225": "leans", "226": "surround", "227": "front", "228": "terminal", "229": "others", "230": "besides", "231": "sparse", "232": "meadow", "233": "lines", "234": "inside", "235": "plants", "236": "neat", "237": "arranged", "238": "round", "239": "church", "240": "sit", "241": "aeroplanes", "242": "apron", "243": "flight", "244": "ploygon", "245": "main", "246": "netlike", "247": "cross", "248": "polygonal", "249": "passenger", "250": "termial", "251": "strip", "252": "shaped", "253": "viaducts", "254": "narrow", "255": "star", "256": "like", "257": "boomerang", "258": "aircraft", "259": "corner", "260": "take", "261": "running", "262": "seven", "263": "tarmac", "264": "boarding", "265": "gate", "266": "landside", "267": "airside", "268": "aircrafts", "269": "tarmacs", "270": "grassy", "271": "waiting", "272": "straight", "273": "standing", "274": "bar", "275": "airstrip", "276": "terminals", "277": "airplanes", "278": "circular", "279": "stretch", "280": "sparsely", "281": "rounded", "282": "rectangle", "283": "surrounds", "284": "radial", "285": "symmetrical", "286": "triangle", "287": "curve", "288": "seated", "289": "wasteland", "290": "polygon", "291": "irregular", "292": "paralleled", "293": "sandwiched", "294": "orderly", "295": "majestic", "296": "surrounding", "297": "arc", "298": "ways", "299": "pieces", "300": "meadows", "301": "strips", "302": "neatly", "303": "trapezoid", "304": "divided", "305": "outside", "306": "complicated", "307": "towers", "308": "cut", "309": "scattered", "310": "airplane", "311": "lamp", "312": "eight", "313": "sitting", "314": "pale", "315": "pure", "316": "y-shaped", "317": "berthing", "318": "conspicuous", "319": "beautiful", "320": "stands", "321": "especially", "322": "packed", "323": "silver", "324": "passed", "325": "wide", "326": "dozens", "327": "e-shaped", "328": "number", "329": "plain", "330": "seen", "331": "station", "332": "t-shaped", "333": "room", "334": "appeared", "335": "fragmented", "336": "terrain", "337": "nothing", "338": "stop", "339": "tower", "340": "blocks", "341": "composed", "342": "ball", "343": "u", "344": "smaller", "345": "single", "346": "farmland", "347": "simple", "348": "along", "349": "order", "350": "taxiing", "351": "rows", "352": "parts", "353": "corners", "354": "squared", "355": "stayed", "356": "nearby", "357": "traverse", "358": "contains", "359": "including", "360": "circle", "361": "ring", "362": "consists", "363": "arc-shaped", "364": "rectangles", "365": "separate", "366": "see", "367": "squares", "368": "wild", "369": "aprons", "370": "bright", "371": "block", "372": "facilities", "373": "complete", "374": "industrial", "375": "people", "376": "flat", "377": "spaces", "378": "huge", "379": "vast", "380": "without", "381": "reddish", "382": "meet", "383": "reddish-brown", "384": "obvious", "385": "marks", "386": "floor", "387": "tract", "388": "traces", "389": "clay", "390": "truck", "391": "driving", "392": "hill", "393": "mark", "394": "vegetation", "395": "khaki", "396": "'s", "397": "work", "398": "space", "399": "sand", "400": "leaking", "401": "looks", "402": "uneven", "403": "planted", "404": "surface", "405": "rugged", "406": "intersect", "407": "vertically", "408": "courtyard", "409": "patch", "410": "shape", "411": "interspersed", "412": "messy", "413": "undulating", "414": "wavy", "415": "sandy", "416": "dotted", "417": "bleak", "418": "dirt", "419": "desert", "420": "stretches", "421": "greenland", "422": "coner", "423": "form", "424": "rough", "425": "formed", "426": "fork", "427": "lie", "428": "traverses", "429": "shallow", "430": "clear", "431": "stripe", "432": "go", "433": "alleys", "434": "runs", "435": "earth", "436": "weeds", "437": "growing", "438": "waste", "439": "broken", "440": "stone", "441": "wheel", "442": "naked", "443": "country", "444": "place", "445": "grows", "446": "amount", "447": "roofs", "448": "kinds", "449": "thing", "450": "things", "451": "part", "452": "irregularly", "453": "blended", "454": "littered", "455": "dots", "456": "half", "457": "rock", "458": "hills", "459": "patterns", "460": "pattern", "461": "lands", "462": "use", "463": "wheels", "464": "holes", "465": "also", "466": "tree-lined", "467": "family", "468": "trucks", "469": "behind", "470": "barren", "471": "flowing", "472": "hillside", "473": "slope", "474": "center", "475": "forming", "476": "flower", "477": "luxuriant", "478": "flowers", "479": "row", "480": "top", "481": "view", "482": "gymnasium", "483": "circles", "484": "quarter", "485": "stadiums", "486": "fan-shaped", "487": "lakes", "488": "silvery", "489": "golf", "490": "course", "491": "filled", "492": "clean", "493": "places", "494": "enough", "495": "smooth", "496": "met", "497": "triangular", "498": "faces", "499": "perfect", "500": "rooms", "501": "players", "502": "lane", "503": "freeway", "504": "baseballfield", "505": "red-roofed", "506": "semi-surrounded", "507": "fan", "508": "fanshaped", "509": "reflections", "510": "distributed", "511": "occupies", "512": "pentagonal", "513": "basketball", "514": "forms", "515": "semicircle", "516": "baseballfields", "517": "another", "518": "sports", "519": "size", "520": "back", "521": "soccer", "522": "pitch", "523": "pitches", "524": "neighborhood", "525": "sized", "526": "highways", "527": "woods", "528": "vertical", "529": "routes", "530": "flanked", "531": "among", "532": "iron", "533": "solar", "534": "pool", "535": "grounds", "536": "bleachers", "537": "enclosed", "538": "coconut", "539": "edges", "540": "used", "541": "reflection", "542": "pentagon", "543": "devided", "544": "residence", "545": "lies", "546": "texture", "547": "items", "548": "addition", "549": "waves", "550": "ocean", "551": "beach", "552": "rocks", "553": "beat", "554": "saw", "555": "spectacular", "556": "magnificent", "557": "surging", "558": "spray", "559": "emerald", "560": "curves", "561": "oceans", "562": "float", "563": "patches", "564": "gradient", "565": "seems", "566": "color", "567": "prosperous", "568": "resort", "569": "thriving", "570": "fishing", "571": "ripples", "572": "look", "573": "forests", "574": "beaches", "575": "hit", "576": "picture", "577": "island", "578": "islands", "579": "quiet", "580": "high", "581": "blue-green", "582": "tourists", "583": "scenery", "584": "rocky", "585": "sands", "586": "right", "587": "dunes", "588": "soft", "589": "grasslands", "590": "grow", "591": "stones", "592": "series", "593": "coastline", "594": "dirty", "595": "waters", "596": "wet", "597": "rushed", "598": "city", "599": "layers", "600": "patting", "601": "wharf", "602": "living", "603": "mountain", "604": "short", "605": "boundary", "606": "thr", "607": "jungle", "608": "stained", "609": "dam", "610": "gently", "611": "ribbon", "612": "jade", "613": "ribbons", "614": "objects", "615": "meets", "616": "integrated", "617": "playing", "618": "play", "619": "sunshine", "620": "seaside", "621": "sun", "622": "connect", "623": "reaches", "624": "towards", "625": "golden", "626": "little", "627": "fish", "628": "found", "629": "yellowish", "630": "cyan", "631": "seas", "632": "umbrellas", "633": "yachts", "634": "better", "635": "stretching", "636": "extends", "637": "roofed", "638": "roof", "639": "shadows", "640": "shade", "641": "structure", "642": "scales", "643": "mountains", "644": "plots", "645": "groves", "646": "bottom", "647": "bridges", "648": "banks", "649": "flows", "650": "two-way", "651": "steel", "652": "made", "653": "spans", "654": "six-lane", "655": "frame", "656": "lanes", "657": "span", "658": "partially", "659": "types", "660": "broad", "661": "cast", "662": "shadow", "663": "muddy", "664": "similar", "665": "two-lane", "666": "turns", "667": "joins", "668": "crisscrossed", "669": "end", "670": "factories", "671": "bus", "672": "branches", "673": "traffic", "674": "yacht", "675": "decorations", "676": "pedestrians", "677": "various", "678": "spanning", "679": "connecting", "680": "valley", "681": "ones", "682": "plenty", "683": "bridege", "684": "suspension", "685": "tributary", "686": "cable", "687": "arch", "688": "divides", "689": "double", "690": "cities", "691": "farmlands", "692": "tiny", "693": "stopping", "694": "walking", "695": "bustling", "696": "low", "697": "saddle", "698": "confluence", "699": "turbid", "700": "goes", "701": "passing", "702": "traveling", "703": "cement", "704": "partly", "705": "riverbank", "706": "docks", "707": "non-parallel", "708": "slender", "709": "come", "710": "slowed", "711": "tanks", "712": "oil", "713": "intersections", "714": "villas", "715": "twists", "716": "reservoirs", "717": "storage", "718": "canal", "719": "wilderness", "720": "farm", "721": "arches", "722": "turning", "723": "villages", "724": "seperated", "725": "automobiles", "726": "tributaries", "727": "towns", "728": "diagonal", "729": "barelands", "730": "set", "731": "eyot", "732": "urban", "733": "ran", "734": "expressway", "735": "cruising", "736": "designed", "737": "design", "738": "conference", "739": "arcs", "740": "floors", "741": "convention", "742": "bend", "743": "tightly", "744": "centers", "745": "shell", "746": "intersecting", "747": "parks", "748": "wonderful", "749": "semi-circular", "750": "intensive", "751": "almost", "752": "crops", "753": "entrance", "754": "unique", "755": "central", "756": "away", "757": "memory", "758": "shows", "759": "concentric", "760": "public", "761": "plaza", "762": "trapezoidal", "763": "quadrilateral", "764": "leaf", "765": "eye", "766": "regular", "767": "hexagonal", "768": "rings", "769": "loop", "770": "hexagon", "771": "grid", "772": "lace", "773": "spherical", "774": "bowl", "775": "flyover", "776": "metal", "777": "esthetic", "778": "aesthetic", "779": "chess", "780": "silver-gray", "781": "nearly", "782": "incomplete", "783": "strange", "784": "grand", "785": "whole", "786": "vivid", "787": "gym", "788": "honeycomb", "789": "site", "790": "camp", "791": "pyramid", "792": "elliptical", "793": "construction", "794": "conical", "795": "glass", "796": "bushes", "797": "railways", "798": "placed", "799": "well", "800": "include", "801": "leaves", "802": "busy", "803": "layout", "804": "distribution", "805": "architectural", "806": "owns", "807": "centre", "808": "variety", "809": "region", "810": "styles", "811": "pink", "812": "moss", "813": "cathedral", "814": "great", "815": "dome", "816": "churches", "817": "spread", "818": "yard", "819": "properly", "820": "ordered", "821": "closed", "822": "semi", "823": "heart", "824": "mint", "825": "gree", "826": "pasture", "827": "every", "828": "includes", "829": "bricks", "830": "wooden", "831": "brick", "832": "garden", "833": "neighborhoods", "834": "junction", "835": "t-junction", "836": "old", "837": "flats", "838": "columns", "839": "cone", "840": "domes", "841": "sloping", "842": "cylindrical", "843": "structures", "844": "column", "845": "apartments", "846": "cruciform", "847": "redor", "848": "closely", "849": "linked", "850": "good", "851": "density", "852": "architecture", "853": "colored", "854": "bit", "855": "fine", "856": "umbrella", "857": "consisting", "858": "quadrangle", "859": "gathered", "860": "alley", "861": "wall", "862": "diamond", "863": "covers", "864": "blu", "865": "e", "866": "ceiling", "867": "flow", "868": "stream", "869": "mirror", "870": "reflected", "871": "bunch", "872": "average", "873": "rather", "874": "shrubs", "875": "nice", "876": "bent", "877": "commercial", "878": "business", "879": "district", "880": "concentrated", "881": "office", "882": "districts", "883": "downtown", "884": "well-organized", "885": "shopping", "886": "mixed", "887": "evergreen", "888": "numerous", "889": "home", "890": "section", "891": "viaduct", "892": "rare", "893": "travel", "894": "drive", "895": "skyscraper", "896": "ordinary", "897": "advertisements", "898": "compact", "899": "advertisement", "900": "rise", "901": "school", "902": "medium", "903": "apartment", "904": "edifices", "905": "height", "906": "overpass", "907": "relatively", "908": "standard", "909": "vary", "910": "distance", "911": "economic", "912": "developed", "913": "schools", "914": "mall", "915": "mainly", "916": "zone", "917": "split", "918": "major", "919": "commerce", "920": "beige", "921": "casting", "922": "billboard", "923": "community", "924": "sections", "925": "gardens", "926": "consist", "927": "sector", "928": "spot", "929": "convenient", "930": "territory", "931": "much", "932": "everywhere", "933": "bed", "934": "sharp", "935": "towering", "936": "villa", "937": "4th", "938": "encircle", "939": "serious", "940": "jams", "941": "nas", "942": "files", "943": "constitute", "944": "marked", "945": "range", "946": "organized", "947": "apart", "948": "compactly", "949": "wood", "950": "distribute", "951": "colours", "952": "pastures", "953": "aligned", "954": "cover", "955": "housing", "956": "shades", "957": "divids", "958": "crooked", "959": "oblique", "960": "came", "961": "rate", "962": "greening", "963": "growth", "964": "mostly", "965": "regularly", "966": "luxury", "967": "extending", "968": "never", "969": "stops", "970": "leisure", "971": "file", "972": "rooftop", "973": "village", "974": "trains", "975": "winds", "976": "homes", "977": "bem", "978": "tight", "979": "extend", "980": "arched", "981": "tem", "982": "agricultural", "983": "concrete", "984": "settled", "985": "tidy", "986": "angle", "987": "arrangement", "988": "past", "989": "regions", "990": "desolate", "991": "wrinkles", "992": "wrinkled", "993": "deserted", "994": "ca", "995": "n't", "996": "anything", "997": "oasis", "998": "deserts", "999": "cracks", "1000": "scale", "1001": "dried", "1002": "figure", "1003": "stains", "1004": "layered", "1005": "pellets", "1006": "consistency", "1007": "visible", "1008": "grove", "1009": "curly", "1010": "crack", "1011": "spots", "1012": "roots", "1013": "tortuous", "1014": "thousands", "1015": "pile", "1016": "dusty", "1017": "arid", "1018": "boundless", "1019": "prairie", "1020": "endless", "1021": "resources", "1022": "nua", "1023": "scene", "1024": "dividing", "1025": "layer", "1026": "peaks", "1027": "band", "1028": "brook", "1029": "outline", "1030": "creamy", "1031": "cream", "1032": "pits", "1033": "snow", "1034": "complex", "1035": "textures", "1036": "petal", "1037": "colour", "1038": "rain", "1039": "really", "1040": "air", "1041": "fold", "1042": "wind", "1043": "gap", "1044": "looking", "1045": "steep", "1046": "turn", "1047": "dead", "1048": "trail", "1049": "alternating", "1050": "sidewalks", "1051": "farms", "1052": "coloured", "1053": "grown", "1054": "cultivated", "1055": "paddy", "1056": "rest", "1057": "ridge", "1058": "ridges", "1059": "landes", "1060": "mud", "1061": "depths", "1062": "crop", "1063": "loose", "1064": "checkered", "1065": "data", "1066": "points", "1067": "wheat", "1068": "leading", "1069": "luxurious", "1070": "uncultivated", "1071": "b", "1072": "forked", "1073": "bald", "1074": "polygons", "1075": "wooded", "1076": "town", "1077": "ramp", "1078": "nest", "1079": "clouds", "1080": "bush", "1081": "unusual", "1082": "families", "1083": "ranch", "1084": "landscape", "1085": "rich", "1086": "hut", "1087": "spring", "1088": "bottle", "1089": "footpath", "1090": "birds", "1091": "grandma", "1092": "flourishing", "1093": "ranges", "1094": "coverage", "1095": "clearly", "1096": "time", "1097": "twisted", "1098": "unpaved", "1099": "altitude", "1100": "tent", "1101": "zones", "1102": "tents", "1103": "v-shaped", "1104": "direction", "1105": "painted", "1106": "withered", "1107": "peak", "1108": "bud", "1109": "cloud", "1110": "shed", "1111": "laid", "1112": "industry", "1113": "workshops", "1114": "group", "1115": "workshop", "1116": "warehouse", "1117": "warehouses", "1118": "container", "1119": "containers", "1120": "paved", "1121": "betweeen", "1122": "barley", "1123": "cylinders", "1124": "basin", "1125": "modern", "1126": "nude", "1127": "belong", "1128": "tank", "1129": "rio", "1130": "whose", "1131": "overpasses", "1132": "valleys", "1133": "lively", "1134": "system", "1135": "pots", "1136": "cans", "1137": "gradually", "1138": "due", "1139": "problems", "1140": "turf", "1141": "prado", "1142": "perpendicular", "1143": "slightly", "1144": "worn", "1145": "boxes", "1146": "decorating", "1147": "bird", "1148": "folds", "1149": "sprouted", "1150": "yet", "1151": "loess", "1152": "fertile", "1153": "medium-sized", "1154": "hidden", "1155": "situated", "1156": "peaceful", "1157": "ten", "1158": "cottage", "1159": "cabins", "1160": "cottages", "1161": "smart", "1162": "residents", "1163": "walls", "1164": "u-shaped", "1165": "entertainment", "1166": "recreation", "1167": "independent", "1168": "still", "1169": "environment", "1170": "sidewalk", "1171": "venues", "1172": "forks", "1173": "leads", "1174": "rolling", "1175": "corrugated", "1176": "terraces", "1177": "eyes", "1178": "snows", "1179": "covering", "1180": "mountainous", "1181": "foot", "1182": "greek", "1183": "heart-shaped", "1184": "artificial", "1185": "lagoon", "1186": "ferris", "1187": "theme", "1188": "interesting", "1189": "outdoor", "1190": "interlaced", "1191": "equipped", "1192": "equipment", "1193": "children", "1194": "one-way", "1195": "seats", "1196": "position", "1197": "crammed", "1198": "hundreds", "1199": "asphalt", "1200": "spacious", "1201": "median", "1202": "ceilings", "1203": "pulled", "1204": "enter", "1205": "manner", "1206": "lost", "1207": "4", "1208": "vehicle", "1209": "platform", "1210": "goal", "1211": "crimson", "1212": "footballground", "1213": "game", "1214": "teaching", "1215": "educational", "1216": "jim", "1217": "twelve", "1218": "lead", "1219": "guys", "1220": "cemnet", "1221": "plastic", "1222": "intelligent", "1223": "hockey", "1224": "base", "1225": "south", "1226": "fica", "1227": "steps", "1228": "barefoot", "1229": "sky", "1230": "image", "1231": "fog", "1232": "mist", "1233": "diamonds", "1234": "combined", "1235": "spindle", "1236": "fountain", "1237": "stars", "1238": "takes", "1239": "l-shaped", "1240": "2", "1241": "ellipses", "1242": "ports", "1243": "harbour", "1244": "pier", "1245": "quality", "1246": "reach", "1247": "docking", "1248": "seawall", "1249": "basins", "1250": "sailboats", "1251": "regulated", "1252": "leave", "1253": "stations", "1254": "rail", "1255": "railroad", "1256": "facility", "1257": "spindle-shaped", "1258": "satellite", "1259": "awnings", "1260": "awning", "1261": "high-speed", "1262": "belongs", "1263": "subway", "1264": "gray-roofed", "1265": "garage", "1266": "guide", "1267": "transportation", "1268": "resorts", "1269": "tropical", "1270": "tourist", "1271": "ahead", "1272": "holiday", "1273": "chairs", "1274": "s-shaped", "1275": "palm", "1276": "roundabout", "1277": "herringbone", "1278": "rainforest", "1279": "wetlands", "1280": "flowed", "1281": "join", "1282": "saddle-shaped", "1283": "greenbelts", "1284": "cake", "1285": "campus", "1286": "northeast", "1287": "countless", "1288": "painting", "1289": "university", "1290": "northwest", "1291": "well-equipped", "1292": "students", "1293": "carefully", "1294": "c-shaped", "1295": "auditorium", "1296": "hot", "1297": "private", "1298": "photo", "1299": "beds", "1300": "hall", "1301": "monument", "1302": "lantern", "1303": "bell", "1304": "x-shaped", "1305": "hold", "1306": "tens", "1307": "new", "1308": "tripods", "1309": "stalls", "1310": "blearchers", "1311": "pipe", "1312": "storagetanks", "1313": "pipelines", "1314": "drums", "1315": "pipes", "1316": "reservoir", "1317": "identical", "1318": "pallets", "1319": "nine", "1320": "trays", "1321": "storerooms", "1322": "barrelan", "1323": "june", "1324": "columnar", "1325": "tanker", "1326": "loaded", "1327": "treatment", "1328": "jars", "1329": "refinery", "1330": "tankers", "1331": "storing", "1332": "jar", "1333": "bicycles", "1334": "barrels", "1335": "shrinking", "1336": "loops", "1337": "auxiliary", "1338": "eight-shaped", "1339": "flyovers", "1340": "ramps", "1341": "three-dimensional", "1342": "brush", "1343": "nested", "1344": "luggage", "1345": "turfs", "1346": "bunkers", "1347": "extremely", "1348": "20", "1349": ""}, "idx": 1350}
--------------------------------------------------------------------------------
/predict/vocabs.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------
2 | # "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information"
3 | # Yuan, Zhiqiang and Zhang, Wenkai and Changyuan Tian and Xuee, Rong and Zhengyuan Zhang and Wang, Hongqi and Fu, Kun and Sun, Xian
4 | # Writen by YuanZhiqiang, 2021. Our code is depended on AMFMN
5 | # ------------------------------------------------------------
6 |
7 | import argparse
8 | import json
9 | import os
10 | from collections import Counter
11 |
12 | import nltk
13 |
14 | annotations = {
15 | 'coco_splits': ['train_caps.txt', 'val_caps.txt', 'test_caps.txt'],
16 | 'flickr30k_splits': ['train_caps.txt', 'val_caps.txt', 'test_caps.txt'],
17 | 'rsicd_precomp': ['train_caps.txt', 'val_caps.txt', 'test_caps.txt'],
18 | 'rsitmd_precomp': ['train_caps.txt', 'val_caps.txt'],
19 | 'ucm_precomp': ['train_caps.txt', 'val_caps.txt'],
20 | 'sydney_precomp': ['train_caps.txt', 'val_caps.txt'],
21 |
22 | }
23 |
24 |
25 | class Vocabulary(object):
26 | """Simple vocabulary wrapper."""
27 |
28 | def __init__(self):
29 | self.word2idx = {}
30 | self.idx2word = {}
31 | self.idx = 1
32 |
33 | def add_word(self, word):
34 | if word not in self.word2idx:
35 | self.word2idx[word] = self.idx
36 | self.idx2word[self.idx] = word
37 | self.idx += 1
38 |
39 | def __call__(self, word):
40 | if word not in self.word2idx:
41 | return self.word2idx['']
42 | return self.word2idx[word]
43 |
44 | def __len__(self):
45 | return len(self.word2idx)
46 |
47 |
48 | def serialize_vocab(vocab, dest):
49 | d = {}
50 | d['word2idx'] = vocab.word2idx
51 | d['idx2word'] = vocab.idx2word
52 | d['idx'] = vocab.idx
53 | with open(dest, "w") as f:
54 | json.dump(d, f)
55 |
56 |
57 | def deserialize_vocab(src):
58 | with open(src) as f:
59 | d = json.load(f)
60 | vocab = Vocabulary()
61 | vocab.word2idx = d['word2idx']
62 | vocab.idx2word = d['idx2word']
63 | vocab.idx = d['idx']
64 | return vocab
65 |
66 |
67 | def from_txt(txt):
68 | captions = []
69 | with open(txt, 'rb') as f:
70 | for line in f:
71 | captions.append(line.strip())
72 | return captions
73 |
74 |
75 | def build_vocab(data_path, data_name, caption_file, threshold):
76 | """Build a simple vocabulary wrapper."""
77 |
78 | stopword_list = list(set(nltk.corpus.stopwords.words('english')))
79 | counter = Counter()
80 | for path in caption_file[data_name]:
81 | full_path = os.path.join(os.path.join(data_path, data_name), path)
82 | captions = from_txt(full_path)
83 |
84 | for i, caption in enumerate(captions):
85 | tokens = nltk.tokenize.word_tokenize(
86 | caption.lower().decode('utf-8'))
87 | punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
88 | tokens = [k for k in tokens if k not in punctuations]
89 | tokens = [k for k in tokens if k not in stopword_list]
90 | counter.update(tokens)
91 |
92 | if i % 1000 == 0:
93 | print("[%d/%d] tokenized the captions." % (i, len(captions)))
94 |
95 | # Discard if the occurrence of the word is less than min_word_cnt.
96 | words = [word for word, cnt in counter.items() if cnt >= threshold]
97 |
98 | # Create a vocab wrapper and add some special tokens.
99 | vocab = Vocabulary()
100 |
101 | # Add words to the vocabulary.
102 | for i, word in enumerate(words):
103 | vocab.add_word(word)
104 | vocab.add_word('')
105 |
106 | return vocab
107 |
108 |
109 | def main(data_path, data_name):
110 | vocab = build_vocab(data_path, data_name, caption_file=annotations, threshold=5)
111 | serialize_vocab(vocab, 'vocab/%s_vocab.json' % data_name)
112 | print("Saved vocabulary file to ", 'vocab/%s_vocab.json' %(data_name))
113 |
114 |
115 |
116 | if __name__ == '__main__':
117 | parser = argparse.ArgumentParser()
118 | parser.add_argument('--data_path', default='data')
119 | parser.add_argument('--data_name', default='sydney_precomp',
120 | help='{coco,f30k}')
121 | opt = parser.parse_args()
122 | main(opt.data_path, opt.data_name)
123 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.11.0
2 | argon2-cffi==20.1.0
3 | asn1crypto==0.24.0
4 | astunparse==1.6.3
5 | async-generator==1.10
6 | attrs==20.3.0
7 | backcall==0.2.0
8 | bleach==3.3.0
9 | cachetools==4.2.0
10 | certifi==2020.12.5
11 | cfbclient==1.1.0
12 | cffi==1.14.4
13 | chardet==4.0.0
14 | click==8.0.4
15 | cryptography==3.3.2
16 | cycler==0.10.0
17 | dataclasses==0.8
18 | decorator==4.4.2
19 | defusedxml==0.6.0
20 | diplomat==1.4.0
21 | entrypoints==0.3
22 | faiss==1.5.3
23 | filelock==3.4.1
24 | flatbuffers==1.12
25 | future==0.18.2
26 | gast==0.3.3
27 | google-auth==1.24.0
28 | google-auth-oauthlib==0.4.2
29 | google-pasta==0.2.0
30 | gr-record==1.12
31 | grpcio==1.32.0
32 | gviz-api==1.9.0
33 | h5py==2.10.0
34 | huggingface-hub==0.0.12
35 | idna==2.6
36 | importlib-metadata==3.4.0
37 | importlib-resources==5.4.0
38 | ipykernel==5.1.1
39 | ipython==7.16.3
40 | ipython-genutils==0.2.0
41 | ipywidgets==7.6.3
42 | jedi==0.17.2
43 | Jinja2==2.11.3
44 | joblib==1.0.1
45 | jsonschema==3.2.0
46 | jupyter==1.0.0
47 | jupyter-client==6.1.11
48 | jupyter-console==6.2.0
49 | jupyter-core==4.7.0
50 | jupyter-http-over-ws==0.0.8
51 | jupyterlab-pygments==0.1.2
52 | jupyterlab-widgets==1.0.0
53 | Keras-Preprocessing==1.1.2
54 | keyring==10.6.0
55 | keyrings.alt==3.0
56 | kiwisolver==1.3.1
57 | lightgbm==3.2.1
58 | llvmlite==0.36.0
59 | Markdown==3.3.3
60 | MarkupSafe==1.1.1
61 | matplotlib==3.3.3
62 | missingno==0.5.0
63 | mistune==0.8.4
64 | nbclient==0.5.1
65 | nbconvert==6.0.7
66 | nbformat==4.4.0
67 | nest-asyncio==1.4.3
68 | nltk==3.6.7
69 | notebook==6.4.10
70 | numba==0.53.1
71 | numpy==1.21
72 | oauthlib==3.1.0
73 | opencv-contrib-python-headless==4.5.5.64
74 | opt-einsum==3.3.0
75 | packaging==20.8
76 | pandas==1.1.5
77 | pandocfilters==1.4.3
78 | parso==0.7.1
79 | pexpect==4.8.0
80 | pickleshare==0.7.5
81 | Pillow==9.0.1
82 | plotly==5.1.0
83 | prometheus-client==0.9.0
84 | prompt-toolkit==3.0.13
85 | protobuf==3.15.0
86 | pstatsd==1.2.3
87 | ptyprocess==0.7.0
88 | pyasn1==0.4.8
89 | pyasn1-modules==0.2.8
90 | pycparser==2.20
91 | pycrypto==2.6.1
92 | Pygments==2.7.4
93 | pygobject==3.26.1
94 | pyparsing==2.4.7
95 | pyrsistent==0.17.3
96 | python-apt==1.6.5+ubuntu0.5
97 | python-consul==1.1.0
98 | python-dateutil==2.8.1
99 | pytz==2021.1
100 | pyxdg==0.26
101 | PyYAML==6.0
102 | pyzmq==21.0.1
103 | qtconsole==5.0.2
104 | QtPy==1.9.0
105 | redis==3.5.3
106 | regex==2022.3.15
107 | requests==2.25.1
108 | requests-oauthlib==1.3.0
109 | rsa==4.7
110 | ruamel.yaml==0.17.21
111 | ruamel.yaml.clib==0.2.6
112 | sacremoses==0.0.49
113 | scikit-learn==0.24.2
114 | scikit-image
115 | scipy==1.5.4
116 | seaborn==0.11.1
117 | SecretStorage==2.3.1
118 | Send2Trash==1.5.0
119 | six==1.15.0
120 | skipthoughts==0.0.1
121 | ssh-import-id==5.7
122 | tenacity==8.0.1
123 | tensorboard==2.4.1
124 | tensorboard-logger==0.1.0
125 | tensorboard-plugin-profile==2.5.0
126 | tensorboard-plugin-wit==1.8.0
127 | termcolor==1.1.0
128 | terminado==0.9.2
129 | testpath==0.4.4
130 | threadpoolctl==2.2.0
131 | timm==0.5.4
132 | tlcache==0.3.1
133 | tokenizers==0.10.3
134 | torch==1.8.1
135 | torchvision==0.9.1
136 | tornado==6.1
137 | tqdm==4.63.1
138 | traitlets==4.3.3
139 | transformers==4.8.1
140 | typing-extensions==3.7.4.3
141 | urllib3==1.26.5
142 | wcwidth==0.2.5
143 | webencodings==0.5.1
144 | Werkzeug==1.0.1
145 | widgetsnbextension==3.5.1
146 | wrapt==1.12.1
147 | xgboost==1.4.2
148 | zipp==3.4.0
149 |
--------------------------------------------------------------------------------
/test_data/imgs/README.md:
--------------------------------------------------------------------------------
1 | Download SLM_testimgs.zip, unzip it and put it here.
2 |
3 | ```angular2
4 | -SLM
5 | ---...
6 | ---test_data
7 | -----...
8 | -----imgs
9 | -------0.jpg
10 | -------1.jpg
11 | -------2.jpg
12 | -------...
13 | -------21.jpg
14 | ```
--------------------------------------------------------------------------------
/test_data/visual_testdata.py:
--------------------------------------------------------------------------------
1 | # **
2 | # * Copyright @2022 AI, AIRCAS. (mails.ucas.ac.cn)
3 | #
4 | # @author yuanzhiqiang
5 | # 2022/04/03
6 |
7 | import json
8 | import os
9 |
10 | import matplotlib.image as imgplt
11 | import matplotlib.pyplot as plt
12 |
13 |
14 | def analyze_samples(json_path):
15 | # load json
16 | with open(json_path,'r',encoding='utf8')as fp:
17 | json_data = json.load(fp)
18 |
19 | # analyze
20 | print("=========================")
21 | print("Lens of items: {}\n".format(len(json_data)))
22 | print("===== Map Relation ======")
23 | for idx, item in enumerate(json_data):
24 | print("Idx:{}, Filename:{}, Caption:{}".format(idx, json_data[idx]['jpg_name'], json_data[idx]['caption'].replace("\n", "")))
25 |
26 | return json_data
27 |
28 | def visual_data(png_path, json_data, show_idx):
29 |
30 | # load show data
31 | show_json_data = json_data[show_idx]
32 | finalname = os.path.join(png_path, show_json_data['jpg_name'])
33 | plotpoints = show_json_data['points']
34 | captiondata = show_json_data['caption']
35 |
36 | print("\n=========================")
37 | print("===== Visual Data=========")
38 | print("Idx: {}".format(show_idx))
39 | print("Filename: {}".format(finalname))
40 | print("Caption: {}".format(captiondata.replace("\n", "")))
41 | print("Annotations: {}".format(plotpoints))
42 |
43 | # visual data
44 | pic = imgplt.imread(finalname)
45 | plt.imshow(pic)
46 | plt.title(show_json_data['jpg_name'])
47 |
48 | xdata = []
49 | ydata = []
50 | for k in range(len(plotpoints)):
51 | xdata.clear()
52 | ydata.clear()
53 | for j in range(len(plotpoints[k])):
54 | item = plotpoints[k][j]
55 | x = item[0]
56 | y = item[1]
57 | plt.scatter(x, y, s=25, c='r')
58 | xdata.append(x)
59 | ydata.append(y)
60 | plt.plot(xdata, ydata, c='b')
61 | plt.plot([xdata[0], xdata[j]], [ydata[0], ydata[j]], c='b')
62 | plt.show()
63 |
64 |
65 | if __name__ == "__main__":
66 | png_path = "./imgs/"
67 | json_path = "./annotations/anno.json"
68 | show_idx = 44
69 |
70 | # analyze samples
71 | json_data = analyze_samples(json_path)
72 |
73 | # plot one sample
74 | visual_data(png_path, json_data, show_idx)
75 |
76 |
--------------------------------------------------------------------------------