├── figures
└── framework.png
├── case_study
└── figures
│ ├── ElkAI.png
│ ├── elk.png
│ ├── traffic.png
│ └── TrafficAI.png
├── codes
├── Inference.ipynb
├── utils.py
└── prompt_generation.ipynb
├── LICENSE
└── README.md
/figures/framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GeoDS/GeoAnalystBench/HEAD/figures/framework.png
--------------------------------------------------------------------------------
/case_study/figures/ElkAI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GeoDS/GeoAnalystBench/HEAD/case_study/figures/ElkAI.png
--------------------------------------------------------------------------------
/case_study/figures/elk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GeoDS/GeoAnalystBench/HEAD/case_study/figures/elk.png
--------------------------------------------------------------------------------
/case_study/figures/traffic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GeoDS/GeoAnalystBench/HEAD/case_study/figures/traffic.png
--------------------------------------------------------------------------------
/case_study/figures/TrafficAI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/GeoDS/GeoAnalystBench/HEAD/case_study/figures/TrafficAI.png
--------------------------------------------------------------------------------
/codes/Inference.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "\n",
10 | "import os\n",
11 | "import openai\n",
12 | "import json\n",
13 | "import numpy as np\n",
14 | "import pandas as pd\n",
15 | "import time\n",
16 | "import csv\n",
17 | "import re\n",
18 | "from utils import call_api\n",
19 | "\n",
20 | "OPENAI_API_KEY = \"\"\n",
21 | "Claude_API_KEY = \"\"\n",
22 | "gemini_API_KEY = \"\"\n",
23 | "os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n",
24 | "os.environ[\"ANTHROPIC_API_KEY\"] = Claude_API_KEY\n",
25 | "os.environ[\"GOOGLE_API_KEY\"] = gemini_API_KEY\n",
26 | "client = openai.OpenAI(api_key = OPENAI_API_KEY) #Put your own open ai api\n",
27 | "\n"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "# code inference for commercial models\n",
37 | "# using the prompts generated by the previous prompt generation process\n",
38 | "call_api('code', 'code_prompts.csv', 'code_responses_gpt.csv', 'gpt4')\n"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "# workflow inference for commercial models\n",
48 | "call_api('workflow', 'workflow_prompts.csv', 'workflow_responses_gpt.csv', 'gpt4')"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "#make sure to install ollama and download the model\n",
58 | "# code inference for local models\n",
59 | "call_api('code', 'code_prompts.csv', 'code_responses_gpt.csv', 'ollama', 'deepseek-r1:latest')\n",
60 | "\n",
61 | "# workflow inference for local models\n",
62 | "call_api('workflow', 'workflow_prompts.csv', 'workflow_responses_gpt.csv', 'ollama', 'deepseek-r1:latest')"
63 | ]
64 | }
65 | ],
66 | "metadata": {
67 | "kernelspec": {
68 | "display_name": "d2l",
69 | "language": "python",
70 | "name": "python3"
71 | },
72 | "language_info": {
73 | "codemirror_mode": {
74 | "name": "ipython",
75 | "version": 3
76 | },
77 | "file_extension": ".py",
78 | "mimetype": "text/x-python",
79 | "name": "python",
80 | "nbconvert_exporter": "python",
81 | "pygments_lexer": "ipython3",
82 | "version": "3.9.18"
83 | }
84 | },
85 | "nbformat": 4,
86 | "nbformat_minor": 2
87 | }
88 |
--------------------------------------------------------------------------------
/codes/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 | from langchain_openai import ChatOpenAI
3 | from langchain_anthropic import ChatAnthropic
4 | from langchain_google_genai import GoogleGenerativeAI
5 | import csv
6 | import pandas as pd
7 | import ollama
8 | def extract_task_list(long_string):
9 | """Extract the task list from a long string."""
10 | lines = long_string.split("\n")
11 | lines = [line.strip() for line in lines if line.strip()]
12 | lines = [line for line in lines if not re.match(r'^\d+\.', line)]
13 | return lines
14 |
15 | def call_gpt(prompt, temperature=0.7, max_tokens=None, timeout=None):
16 | llm = ChatOpenAI(
17 | model="gpt-4o-mini",
18 | temperature=temperature,
19 | max_tokens=max_tokens,
20 | timeout=timeout
21 | )
22 | result = llm.invoke(prompt)
23 | return result.content
24 |
25 | def call_claude(prompt, temperature=0.7, max_tokens=None, timeout=None):
26 | llm = ChatAnthropic(model="claude-3-5-sonnet-20241022",
27 | temperature=temperature,
28 | max_tokens=max_tokens,
29 | timeout=timeout)
30 | result = llm.invoke(prompt)
31 | return result.content
32 |
33 | def call_gemini(prompt, temperature=0.7, max_tokens=None, timeout=None):
34 | llm = GoogleGenerativeAI(model="gemini-1.5-flash",
35 | temperature=temperature,
36 | max_tokens=max_tokens,
37 | timeout=timeout)
38 | result = llm.invoke(prompt)
39 | return result
40 |
41 | def calculate_workflow_length_loss(annotations, responses):
42 | loss = 0
43 | for i in range(len(annotations)):
44 | filtered_responses = responses[responses["task_id"] == annotations.iloc[i]["id"]]
45 | for j in range(len(filtered_responses)):
46 | loss += abs(filtered_responses.iloc[j]["task_length"] - annotations.iloc[i]["task_length"])
47 | loss = loss / len(annotations)
48 | return loss
49 |
50 | def find_task_length(outline):
51 | number_list = []
52 | lines = outline.split("\n")
53 | for i, line in enumerate(lines):
54 | for j, char in enumerate(line):
55 | if char.isdigit():
56 | # Check if next char forms a 2-digit number
57 | if j + 1 < len(line) and line[j + 1].isdigit():
58 | if j + 2 < len(line) and line[j + 2] == '.':
59 | num = int(line[j:j+2])
60 | number_list.append(num)
61 | else:
62 | if j + 1 < len(line) and line[j + 1] == '.':
63 | num = int(char)
64 | if num <= 10:
65 | number_list.append(num)
66 | if not number_list:
67 | return 0
68 | return max(number_list)
69 |
70 | def call_api(api_type, prompt_file, output_file, model, ollama_model='deepseek-r1:latest', temperature=0.7):
71 | '''
72 | api_type: 'workflow' or 'code'
73 | prompt_file: the path to the csv file containing the prompts
74 | output_file: the path to the csv file where responses will be written
75 | model: 'gpt4', 'claude', 'gemini'
76 |
77 | this function will call the api for each prompt in the prompt_file and write the responses to the output_file
78 | '''
79 | prompts = pd.read_csv(prompt_file)
80 | with open(prompt_file, "w", newline='') as f:
81 | writer = csv.writer(f)
82 | writer.writerow(['task_id', 'response_id', 'prompt_type', 'response_type', 'Arcpy', 'llm_model', 'response_content', 'task_length'])
83 |
84 | responses = []
85 | for i, content in prompts.iterrows():
86 | if i > 0: # Clear previous line if not first iteration
87 | print('\r' + ' ' * 50 + '\r', end='') # Clear the previous line
88 | print(str(i+1) + '/' + str(len(prompts)), end='', flush=True)
89 | responses = []
90 | prompt = content['prompt_content']
91 | for i in range(3):
92 | if model == 'gpt4':
93 | response = call_gpt(prompt, temperature)
94 | elif model == 'claude':
95 | response = call_claude(prompt, temperature)
96 | elif model == 'gemini':
97 | response = call_gemini(prompt, temperature)
98 | elif model == 'ollama':
99 | response = call_ollama(prompt, ollama_model, temperature)
100 | responses.append(response)
101 | with open(output_file, "a", newline='') as f:
102 | writer = csv.writer(f)
103 | if content['domain_knowledge'] == True and content['dataset'] == True:
104 | type = 'domain_and_dataset'
105 | elif content['domain_knowledge'] == True:
106 | type = 'domain'
107 | elif content['dataset'] == True:
108 | type = 'dataset'
109 | else:
110 | type = 'original'
111 | if api_type == 'workflow':
112 | for i, response in enumerate(responses):
113 | writer.writerow([content['task_id'], str(content['task_id'])+api_type+str(i), type, api_type, content['Arcpy'], model, response, len(extract_task_list(response))])
114 | elif api_type == 'code':
115 | for i, response in enumerate(responses):
116 | writer.writerow([content['task_id'], str(content['task_id'])+api_type+str(i), type, api_type, content['Arcpy'], model, response, 'none'])
117 |
118 |
119 | def call_ollama(prompt, model='deepseek-r1:latest', temperature=0.7):
120 | # call ollama with different open source models
121 | response = ollama.generate(
122 | model=model,
123 | options={"temperature": temperature},
124 | prompt=prompt,
125 | )
126 | result = response.response
127 | if '' in result:
128 | return result.split("")[1].strip() # for deepseek-r1:latest
129 | else:
130 | return result
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/codes/prompt_generation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 6,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import pandas as pd\n",
10 | "import csv\n"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 7,
16 | "metadata": {},
17 | "outputs": [
18 | {
19 | "data": {
20 | "text/html": [
21 | "
\n",
22 | "\n",
35 | "
\n",
36 | " \n",
37 | " \n",
38 | " | \n",
39 | " id | \n",
40 | " Open Source | \n",
41 | " Task Categories1 | \n",
42 | " Task Categories2 | \n",
43 | " Task Categories3 | \n",
44 | " Task | \n",
45 | " Instruction | \n",
46 | " Domain Knowledge | \n",
47 | " Dataset Description | \n",
48 | " Human Designed Workflow | \n",
49 | " Task Length | \n",
50 | " CodeString | \n",
51 | "
\n",
52 | " \n",
53 | " \n",
54 | " \n",
55 | " | 0 | \n",
56 | " 1 | \n",
57 | " T | \n",
58 | " Making predictions | \n",
59 | " Understanding where | \n",
60 | " NaN | \n",
61 | " Find heat islands and at-risk populations in M... | \n",
62 | " Your task is analyzing urban heat using Krigin... | \n",
63 | " Kriging is a commonly used spatial interpolati... | \n",
64 | " dataset/Temperature.geojson: Geojson file that... | \n",
65 | " 1. Load dataset\\n2. Interpolate(Temperature)\\n... | \n",
66 | " 7 | \n",
67 | " import numpy as np\\nimport geopandas as gpd\\nf... | \n",
68 | "
\n",
69 | " \n",
70 | " | 1 | \n",
71 | " 2 | \n",
72 | " T | \n",
73 | " Detecting and quantifying patterns | \n",
74 | " Finding the best locations and paths | \n",
75 | " NaN | \n",
76 | " Find future bus stop locations in Hamilton, Te... | \n",
77 | " Your task is performing analysis on public tra... | \n",
78 | " The Overlay toolset contains tools to overlay ... | \n",
79 | " dataset/BusServiceArea.geojson: Geojson file s... | \n",
80 | " 1. Load dataset\\n2. Filter(Poverty)\\n3. Filter... | \n",
81 | " 6 | \n",
82 | " import geopandas as gpd\\nimport matplotlib.pyp... | \n",
83 | "
\n",
84 | " \n",
85 | " | 2 | \n",
86 | " 3 | \n",
87 | " T | \n",
88 | " Understanding where | \n",
89 | " Detecting and quantifying patterns | \n",
90 | " NaN | \n",
91 | " Assess burn scars and understanding the impact... | \n",
92 | " Your task is assessing burn scars using satell... | \n",
93 | " Normalized Burn Ratio (NBR) is used to identif... | \n",
94 | " dataset/G_2014.tif: Raster file of satellite i... | \n",
95 | " 1. Load dataset\\n2. Filter(2014 bands)\\n3. Fil... | \n",
96 | " 6 | \n",
97 | " import rasterio\\nimport numpy as np\\nimport ma... | \n",
98 | "
\n",
99 | " \n",
100 | " | 3 | \n",
101 | " 4 | \n",
102 | " F | \n",
103 | " Understanding where | \n",
104 | " Finding the best locations and paths | \n",
105 | " NaN | \n",
106 | " Identify groundwater vulnerable areas that nee... | \n",
107 | " Your task is identifying groundwater vulnerabl... | \n",
108 | " Suitability modeling is an analytical process ... | \n",
109 | " dataset/mc_soils.shp: In this shapefile, three... | \n",
110 | " 1. Load dataset\\n2. Project (shapefile)\\n3. Po... | \n",
111 | " 10 | \n",
112 | " import arcpy\\nfrom arcpy.sa import *\\nfrom arc... | \n",
113 | "
\n",
114 | " \n",
115 | " | 4 | \n",
116 | " 5 | \n",
117 | " F | \n",
118 | " Detecting and quantifying patterns | \n",
119 | " NaN | \n",
120 | " NaN | \n",
121 | " Visualize the data about children with elevate... | \n",
122 | " Your task is visualizing data about children w... | \n",
123 | " Hot spot analysis is based on mathematical cal... | \n",
124 | " High_Blood_Level_Results.shp: This shapefile c... | \n",
125 | " 1.Load dataset\\n2. Perform Optimized Hot Spot ... | \n",
126 | " 5 | \n",
127 | " import arcpy\\n\\n# Set up the input shapefiles ... | \n",
128 | "
\n",
129 | " \n",
130 | "
\n",
131 | "
"
132 | ],
133 | "text/plain": [
134 | " id Open Source Task Categories1 \\\n",
135 | "0 1 T Making predictions \n",
136 | "1 2 T Detecting and quantifying patterns \n",
137 | "2 3 T Understanding where \n",
138 | "3 4 F Understanding where \n",
139 | "4 5 F Detecting and quantifying patterns \n",
140 | "\n",
141 | " Task Categories2 Task Categories3 \\\n",
142 | "0 Understanding where NaN \n",
143 | "1 Finding the best locations and paths NaN \n",
144 | "2 Detecting and quantifying patterns NaN \n",
145 | "3 Finding the best locations and paths NaN \n",
146 | "4 NaN NaN \n",
147 | "\n",
148 | " Task \\\n",
149 | "0 Find heat islands and at-risk populations in M... \n",
150 | "1 Find future bus stop locations in Hamilton, Te... \n",
151 | "2 Assess burn scars and understanding the impact... \n",
152 | "3 Identify groundwater vulnerable areas that nee... \n",
153 | "4 Visualize the data about children with elevate... \n",
154 | "\n",
155 | " Instruction \\\n",
156 | "0 Your task is analyzing urban heat using Krigin... \n",
157 | "1 Your task is performing analysis on public tra... \n",
158 | "2 Your task is assessing burn scars using satell... \n",
159 | "3 Your task is identifying groundwater vulnerabl... \n",
160 | "4 Your task is visualizing data about children w... \n",
161 | "\n",
162 | " Domain Knowledge \\\n",
163 | "0 Kriging is a commonly used spatial interpolati... \n",
164 | "1 The Overlay toolset contains tools to overlay ... \n",
165 | "2 Normalized Burn Ratio (NBR) is used to identif... \n",
166 | "3 Suitability modeling is an analytical process ... \n",
167 | "4 Hot spot analysis is based on mathematical cal... \n",
168 | "\n",
169 | " Dataset Description \\\n",
170 | "0 dataset/Temperature.geojson: Geojson file that... \n",
171 | "1 dataset/BusServiceArea.geojson: Geojson file s... \n",
172 | "2 dataset/G_2014.tif: Raster file of satellite i... \n",
173 | "3 dataset/mc_soils.shp: In this shapefile, three... \n",
174 | "4 High_Blood_Level_Results.shp: This shapefile c... \n",
175 | "\n",
176 | " Human Designed Workflow Task Length \\\n",
177 | "0 1. Load dataset\\n2. Interpolate(Temperature)\\n... 7 \n",
178 | "1 1. Load dataset\\n2. Filter(Poverty)\\n3. Filter... 6 \n",
179 | "2 1. Load dataset\\n2. Filter(2014 bands)\\n3. Fil... 6 \n",
180 | "3 1. Load dataset\\n2. Project (shapefile)\\n3. Po... 10 \n",
181 | "4 1.Load dataset\\n2. Perform Optimized Hot Spot ... 5 \n",
182 | "\n",
183 | " CodeString \n",
184 | "0 import numpy as np\\nimport geopandas as gpd\\nf... \n",
185 | "1 import geopandas as gpd\\nimport matplotlib.pyp... \n",
186 | "2 import rasterio\\nimport numpy as np\\nimport ma... \n",
187 | "3 import arcpy\\nfrom arcpy.sa import *\\nfrom arc... \n",
188 | "4 import arcpy\\n\\n# Set up the input shapefiles ... "
189 | ]
190 | },
191 | "execution_count": 7,
192 | "metadata": {},
193 | "output_type": "execute_result"
194 | }
195 | ],
196 | "source": [
197 | "data = pd.read_csv(\"GeoAnalystBench.csv\")\n",
198 | "data.head()"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": 8,
204 | "metadata": {},
205 | "outputs": [
206 | {
207 | "data": {
208 | "text/plain": [
209 | "id 1\n",
210 | "Open Source T\n",
211 | "Task Categories1 Making predictions\n",
212 | "Task Categories2 Understanding where\n",
213 | "Task Categories3 NaN\n",
214 | "Task Find heat islands and at-risk populations in M...\n",
215 | "Instruction Your task is analyzing urban heat using Krigin...\n",
216 | "Domain Knowledge Kriging is a commonly used spatial interpolati...\n",
217 | "Dataset Description dataset/Temperature.geojson: Geojson file that...\n",
218 | "Human Designed Workflow 1. Load dataset\\n2. Interpolate(Temperature)\\n...\n",
219 | "Task Length 7\n",
220 | "CodeString import numpy as np\\nimport geopandas as gpd\\nf...\n",
221 | "Name: 0, dtype: object"
222 | ]
223 | },
224 | "execution_count": 8,
225 | "metadata": {},
226 | "output_type": "execute_result"
227 | }
228 | ],
229 | "source": [
230 | "# Use the first task as an example\n",
231 | "ID = 1 # Use your own task id\n",
232 | "\n",
233 | "row = data.iloc[ID-1]\n",
234 | "if row['Open Source'] == 'T':\n",
235 | " Arcpy = False\n",
236 | "else:\n",
237 | " Arcpy = True\n",
238 | "row"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": 10,
244 | "metadata": {},
245 | "outputs": [],
246 | "source": [
247 | "def add_line_breaks(long_string, char_limit=80): #limit char from each line to 80\n",
248 | " words = long_string.split()\n",
249 | " new_string = \"\"\n",
250 | " char_count = 0\n",
251 | " for word in words:\n",
252 | " new_string += word + \" \"\n",
253 | " char_count += len(word)\n",
254 | " if char_count > char_limit:\n",
255 | " new_string += \"\\n\"\n",
256 | " char_count = 0\n",
257 | " return new_string\n",
258 | "\n",
259 | "def long_line_break(long_string): #limit string for being too long per line\n",
260 | " result = \"\"\n",
261 | "\n",
262 | " if isinstance(long_string, str):\n",
263 | " for line in long_string.split(\"\\n\"):\n",
264 | " new_line = add_line_breaks(line)\n",
265 | " result += new_line + \"\\n\"\n",
266 | " else:\n",
267 | " result = str(long_string)\n",
268 | " return result\n",
269 | "\n",
270 | "\n",
271 | "task = row[\"Task\"]\n",
272 | "instruction = row[\"Instruction\"]\n",
273 | "domainKnowledge = row[\"Domain Knowledge\"]\n",
274 | "dataset = row[\"Dataset Description\"]\n",
275 | "\n",
276 | "instruction = add_line_breaks(instruction)\n",
277 | "task = add_line_breaks(task)\n",
278 | "domainKnowledge = add_line_breaks(domainKnowledge)\n",
279 | "dataset = long_line_break(dataset)\n"
280 | ]
281 | },
282 | {
283 | "cell_type": "code",
284 | "execution_count": 13,
285 | "metadata": {},
286 | "outputs": [],
287 | "source": [
288 | "def workflowTemplate(IDs=None, tasks=None, instructions=None, zeroShot=False, domainKnowledges=None, datasets=None):\n",
289 | " if task is None and instruction is None:\n",
290 | " print(\"Task or Instruction is necessary\")\n",
291 | " return None\n",
292 | " prompt = {\n",
293 | " \"Task\": tasks,\n",
294 | " \"Instruction\": instructions,\n",
295 | " \"Domain Knowledge\": domainKnowledges,\n",
296 | " \"Dataset Description\": datasets\n",
297 | " }\n",
298 | "\n",
299 | " template = \"\"\"As a Geospatial data scientist, you will generate a workflow to a proposed task.\\n\"\"\" #Define role\n",
300 | " for key, value in prompt.items():\n",
301 | " if value is not None:\n",
302 | " template += f\"\\n[{key}]: \\n{value}\" #include information\n",
303 | "\n",
304 | " #one shot sample\n",
305 | " sample = \"\"\" \\n\\\"\\\"\\\"\n",
306 | " tasks = [“task1”, “task2”, “task3”]\n",
307 | "\n",
308 | " G = nx.DiGraph()\n",
309 | " for i in range(len(tasks) - 1):\n",
310 | " G.add_edge(tasks[i], tasks[i + 1])\n",
311 | " pos = nx.drawing.nx_pydot.graphviz_layout(G, prog=\"dot\")\n",
312 | " plt.figure(figsize=(15, 8))\n",
313 | " nx.draw(G, pos, with_labels=True, node_size=3000, node_color='lightblue', font_size=10, font_weight='bold', arrowsize=20)\n",
314 | " plt.title(\"Workflow for Analyzing Urban Heat Using Kriging Interpolation\", fontsize=14)\n",
315 | " plt.show()\\n\\\"\\\"\\\"\n",
316 | " \"\"\"\n",
317 | "\n",
318 | " #Key Notes\n",
319 | " template += '\\n[Key Notes]:'\n",
320 | " template += '\\n1.Use **automatic reasoning** and clearly explain each step (Chain of Thoughts approach).'\n",
321 | " template += '\\n2.Using **NetworkX* package for visualization.'\n",
322 | " template += '\\n3.Using \\'dot\\' for graph visualization layout.'\n",
323 | " template += '\\n4.Multiple subtasks can be proceeded correspondingly because'\n",
324 | " template += '\\nall of their outputs will be inputs for the next subtask.'\n",
325 | " template += \"\\n5.Limiting your output to code, no extra information.\"\n",
326 | " template += '\\n6.Only codes for workflow, no implementation.'\n",
327 | " template += '\\n'\n",
328 | " if zeroShot is False:\n",
329 | " template += \"\\n[Expected Sample Output Begin]\"\n",
330 | " template += \"\\n\" + sample\n",
331 | " template += \"[Expected Sample Output End]\"\n",
332 | " return template\n"
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "execution_count": 11,
338 | "metadata": {},
339 | "outputs": [],
340 | "source": [
341 | "def codeTemplate(IDs=None, tasks=None, instructions=None, zeroShot=False, domainKnowledges=None, datasets=None, Arcpy=False):\n",
342 | " if task is None and instruction is None:\n",
343 | " print(\"Task or Instruction is necessary\")\n",
344 | " return None\n",
345 | " prompt = {\n",
346 | " \"Task\": tasks,\n",
347 | " \"Instruction\": instructions,\n",
348 | " \"Domain Knowledge\": domainKnowledges,\n",
349 | " \"Dataset Description\": datasets\n",
350 | " }\n",
351 | "\n",
352 | " template = \"\"\"As a Geospatial data scientist, generate a python file to solve the proposed task.\\n\"\"\"\n",
353 | " for key, value in prompt.items():\n",
354 | " if value is not None:\n",
355 | " template += f\"\\n[{key}]: \\n{value}\"\n",
356 | "\n",
357 | " sample = \"\"\" \\\"\\\"\\\"\n",
358 | " import packages\n",
359 | "\n",
360 | " def main():\n",
361 | " path = \"path\"\n",
362 | " data = loaddata()\n",
363 | " #code for subtask1\n",
364 | " #code for subtask2\n",
365 | " #code for final task\n",
366 | "\n",
367 | " if __name__ == \"__main__\":\n",
368 | " main()\n",
369 | " \\\"\\\"\\\"\n",
370 | " \"\"\"\n",
371 | " template += '\\n\\n[Key Notes]:'\n",
372 | " template += '\\n1.Use **automatic reasoning** and clearly explain each subtask before performing it (ReAct approach).'\n",
373 | " template += '\\n2.Using latest python packages for code generation'\n",
374 | " template += \"\\n3.Put all code under main function, no helper functions\"\n",
375 | " template += \"\\n4.Limit your output to code, no extra information.\"\n",
376 | " if Arcpy is True:\n",
377 | " template += \"\\n5.Use latest **Arcpy** functions only\"\n",
378 | " else:\n",
379 | " template += \"\\n5.Use latest open source python packages only\"\n",
380 | " template += '\\n'\n",
381 | " if zeroShot is False:\n",
382 | " template += \"\\n[Expected Sample Output Begin]\"\n",
383 | " template += \"\\n\" + sample\n",
384 | " template += \"[Expected Sample Output End]\"\n",
385 | " return template"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": 14,
391 | "metadata": {},
392 | "outputs": [
393 | {
394 | "name": "stdout",
395 | "output_type": "stream",
396 | "text": [
397 | "As a Geospatial data scientist, generate a python file to solve the proposed task.\n",
398 | "\n",
399 | "[Task]: \n",
400 | "Find heat islands and at-risk populations in Madison, Wisconsin \n",
401 | "[Instruction]: \n",
402 | "Your task is analyzing urban heat using Kriging interpolation techniques in Python. The analysis \n",
403 | "should focus on understanding spatial patterns of urban heat islands by using point temperature \n",
404 | "data and interpolating these values across a city. You will have to use a demographic layer to extract \n",
405 | "and enhance the data visualization on the elder group(Age>65). The goal is to load the temperature \n",
406 | "sample data, apply the Kriging method to predict temperature across the urban area, and generate \n",
407 | "a choropleth map showing the average interpolated temperature surface in each census block group. \n",
408 | "Highlighting the area with high interpolated area as well as high density of the elder population. \n",
409 | "The final output should be saved as \"pred_results/interpolated_urban_heat.png\". \n",
410 | "\n",
411 | "[Key Notes]:\n",
412 | "1.Use **automatic reasoning** and clearly explain each subtask before performing it (ReAct approach).\n",
413 | "2.Using latest python packages for code generation\n",
414 | "3.Put all code under main function, no helper functions\n",
415 | "4.Limit your output to code, no extra information.\n",
416 | "5.Use latest open source python packages only\n",
417 | "\n"
418 | ]
419 | }
420 | ],
421 | "source": [
422 | "sample_code_prompt = codeTemplate(tasks=task, instructions=instruction, zeroShot=True, Arcpy=False)\n",
423 | "sample_workflow_prompt = workflowTemplate(tasks=task, instructions=instruction, zeroShot=False)\n",
424 | "\n",
425 | "print(sample_code_prompt)"
426 | ]
427 | },
428 | {
429 | "cell_type": "code",
430 | "execution_count": 17,
431 | "metadata": {},
432 | "outputs": [],
433 | "source": [
434 | "#Generate prompts for first 50 tasks\n",
435 | "with open('code_prompts.csv', 'w') as f:\n",
436 | " writer = csv.writer(f)\n",
437 | " writer.writerow(['task_id', 'type', 'domain_knowledge', 'dataset', 'Arcpy', 'prompt_content'])\n",
438 | "\n",
439 | "with open('workflow_prompts.csv', 'w') as f:\n",
440 | " writer = csv.writer(f)\n",
441 | " writer.writerow(['task_id', 'type', 'domain_knowledge', 'dataset', 'Arcpy', 'prompt_content'])\n",
442 | "\n",
443 | "for id in range(50):\n",
444 | " row = data.iloc[id]\n",
445 | " task = row[\"Task\"]\n",
446 | " instruction = row[\"Instruction\"]\n",
447 | " domainKnowledge = row[\"Domain Knowledge\"]\n",
448 | " dataset = row[\"Dataset Description\"]\n",
449 | " if row[\"Open Source\"] != 'T':\n",
450 | " Arcpy = True\n",
451 | " else:\n",
452 | " Arcpy = False\n",
453 | "\n",
454 | " instruction = add_line_breaks(instruction)\n",
455 | " task = add_line_breaks(task)\n",
456 | " domainKnowledge = add_line_breaks(domainKnowledge)\n",
457 | " dataset = long_line_break(dataset)\n",
458 | "\n",
459 | " # Generate prompts with different combinations of domain knowledge and dataset descriptions\n",
460 | " combinations = [ #each task has 4 combinations of domain knowledge and dataset descriptions\n",
461 | " (False, False),\n",
462 | " (True, False),\n",
463 | " (False, True),\n",
464 | " (True, True)\n",
465 | " ]\n",
466 | "\n",
467 | " for domain, dataset_included in combinations:\n",
468 | " # Build parameters for template functions\n",
469 | " # Build params for code template\n",
470 | " code_params = {\n",
471 | " 'tasks': task,\n",
472 | " 'instructions': instruction,\n",
473 | " 'zeroShot': True,\n",
474 | " 'Arcpy': Arcpy\n",
475 | " }\n",
476 | "\n",
477 | " # Build params for workflow template (without Arcpy)\n",
478 | " workflow_params = {\n",
479 | " 'tasks': task,\n",
480 | " 'instructions': instruction,\n",
481 | " 'zeroShot': False\n",
482 | " }\n",
483 | "\n",
484 | " if domain:\n",
485 | " code_params['domainKnowledges'] = domainKnowledge\n",
486 | " workflow_params['domainKnowledges'] = domainKnowledge\n",
487 | " if dataset_included:\n",
488 | " code_params['datasets'] = dataset\n",
489 | " workflow_params['datasets'] = dataset\n",
490 | "\n",
491 | " # Generate code and workflow prompts\n",
492 | " code_prompt = codeTemplate(**code_params)\n",
493 | " workflow_prompt = workflowTemplate(**workflow_params)\n",
494 | "\n",
495 | " # Write to CSV files\n",
496 | " with open('code_prompts.csv', 'a', newline='') as f:\n",
497 | " writer = csv.writer(f)\n",
498 | " writer.writerow([id+1, 'code', domain, dataset_included, Arcpy, code_prompt])\n",
499 | " with open('workflow_prompts.csv', 'a', newline='') as f:\n",
500 | " writer = csv.writer(f)\n",
501 | " writer.writerow([id+1, 'workflow', domain, dataset_included, Arcpy, workflow_prompt])\n"
502 | ]
503 | }
504 | ],
505 | "metadata": {
506 | "kernelspec": {
507 | "display_name": "d2l",
508 | "language": "python",
509 | "name": "python3"
510 | },
511 | "language_info": {
512 | "codemirror_mode": {
513 | "name": "ipython",
514 | "version": 3
515 | },
516 | "file_extension": ".py",
517 | "mimetype": "text/x-python",
518 | "name": "python",
519 | "nbconvert_exporter": "python",
520 | "pygments_lexer": "ipython3",
521 | "version": "3.9.18"
522 | }
523 | },
524 | "nbformat": 4,
525 | "nbformat_minor": 2
526 | }
527 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GeoAnalystBench
2 | GeoAnalystBench: A GeoAI benchmark for assessing large language models for spatial analysis workflow and code generation
3 |
4 | ## Automating GIS Workflows with Large Language Models (LLMs)
5 |
6 | ## Reference
7 |
8 | Zhang, Q., Gao, S., Wei, C., Zhao, Y., Nie, Y., Chen, Z., Chen, S., Su, Y., & Sun, H. (2025). [GeoAnalystBench: A GeoAI benchmark for assessing large language models for spatial analysis workflow and code generation.](https://onlinelibrary.wiley.com/doi/10.1111/tgis.70135) Transactions in GIS, 29(7), e70135.
9 |
10 |
11 | ```
12 | @article{zhang2025geoanalystbench,
13 | title={GeoAnalystBench: A GeoAI benchmark for assessing large language models for spatial analysis workflow and code generation},
14 | author={Zhang, Qianheng and Gao, Song and Wei, Chen and Zhao, Yibo and Nie, Ying and Chen, Ziru and Chen, Shijie and Su, Yu and Sun, Huan},
15 | journal={Transactions in GIS},
16 | volume={29},
17 | number={7},
18 | pages={e70135},
19 | year={2025}
20 | }
21 | ```
22 |
23 | Recent advances in Geospatial Artificial Intelligence (GeoAI) have been driven by generative AI and foundation models. While powerful geoprocessing tools are widely available in Geographic Information Systems (GIS), automating these workflows using AI-driven Python scripting remains a challenge, especially for non-expert users.
24 |
25 | This project explores the capabilities of Large Language Models (LLMs) such as ChatGPT, Claude, Gemini, Llama, and DeepSeek in automating GIS workflows. We introduce a benchmark of well-designed 50 real-world geoprocessing tasks carefully validated by GIS domain experts to evaluate these models' ability to generate spatial analysis workflow and Python functions from natural language instructions.
26 |
27 | Our findings reveal that proprietary LLMs achieve higher success rates (>90%) and produce workflows more aligned with human-designed implementations than smaller parameter-sized open-source models. The results suggest that integrating proprietary LLMs with ArcPy is a more effective approach for specialized GIS workflows.
28 |
29 | By providing benchmarks and insights, this study contributes to the development of optimized prompting strategies, future GIS automation tools, and hybrid GeoAI workflows that combine LLMs with human expertise.
30 | 
31 | ## Key Features:
32 | - **Benchmark for GIS Automation**: Evaluation of LLMs on 50 real-world geoprocessing tasks.
33 | - **LLM Performance Comparison**: Validity and similarity analysis of generated workflows.
34 | - **Open-source Versus Proprietary Models**: Comparison of performance and reliability.
35 |
36 | ## Dataset
37 |
38 | This research developed 50 Python-based real-world geoprocessing tasks derived
39 | from GIS platforms, software, online tutorials, and academic literature. Each task comprises 3 to 10 subtasks, because
40 | the simplest task still involves data loading, applying at least one spatial analysis tool, and saving the final outputs. The
41 | list of those tasks with their sources are included in the [Tasks](#tasks) section below.
42 |
43 | The geoprocessing task dataset includes the following information:
44 | | Key Column | Description |
45 | |---------------------------|-------------|
46 | | ID | Unique identifier for each task |
47 | | Open or Closed Source | Use open source or closed source library |
48 | | Task | Brief description of the task |
49 | | Instruction/Prompt | Natural language instruction for completing the task |
50 | | Domain Knowledge | Domain-specific knowledge related to task |
51 | | Dataset Description | Data name, format, descriptions, and key columns |
52 | | Human Designed Workflow | Numbered list of human-designed workflow |
53 | | Task Length | The length of the human-designed workflow |
54 | | Code | Human-designed Python code for the task and dataset |
55 |
56 | The geoprocessing task dataset is avaliable to download at [GeoAnalystBench](https://github.com/GeoDS/GeoAnalystBench/blob/master/dataset/GeoAnalystBench.csv).
57 |
58 | The data being used in this research is avaliable to download at [Google Drive](https://drive.google.com/drive/u/0/folders/1GhgxWkNVh4FTgS1RETgvbstBqx0Q9ezp).
59 |
60 | ## Tasks
61 | There are 50 tasks in the dataset, and this section covers all tasks and their sources. For more details, please refer to the [GeoAnalystBench](https://github.com/GeoDS/GeoAnalystBench/blob/master/dataset/GeoAnalystBench.csv).
62 |
63 | Note that there are tasks with the same name but different id. This typically happens when the task is slightly different, or the task is a subset of a larger task.
64 |
65 |
67 |
68 | | ID | Task Name | Source |
69 | |----|-----------|--------|
70 | | 1 | Find heat islands and at-risk populations in Madison, Wisconsin | [Analyze urban heat using kriging](https://learn.arcgis.com/en/projects/analyze-urban-heat-using-kriging/) |
71 | | 2 | Find future bus stop locations in Hamilton | [Assess access to public transit](https://learn.arcgis.com/en/projects/assess-access-to-public-transit/) |
72 | | 3 | Assess burn scars and wildfire impact in Montana using satellite imagery | [Assess burn scars with satellite imagery](https://learn.arcgis.com/en/projects/assess-burn-scars-with-satellite-imagery/) |
73 | | 4 | Identify groundwater vulnerable areas that need protection | [Identify groundwater vulnerable areas](https://learn.arcgis.com/en/projects/identify-groundwater-vulnerable-areas/) |
74 | | 5 | Visualize data on children with elevated blood lead levels while protecting privacy | [De-identify health data for visualization and sharing](https://learn.arcgis.com/en/projects/de-identify-health-data-for-visualization-and-sharing/) |
75 | | 6 | Use animal GPS tracks to model home range and movement over time | [Model animal home range](https://learn.arcgis.com/en/projects/model-animal-home-range/) |
76 | | 7 | Analyze the impacts of land subsidence on flooding | [Model how land subsidence affects flooding](https://learn.arcgis.com/en/projects/model-how-land-subsidence-affects-flooding/) |
77 | | 8 | Find gaps in Toronto fire station service coverage | [Get started with Python in ArcGIS Pro](https://learn.arcgis.com/en/projects/get-started-with-python-in-arcgis-pro/) |
78 | | 9 | Find the deforestation rate for Rondônia | [Predict deforestation in the Amazon rain forest](https://learn.arcgis.com/en/projects/predict-deforestation-in-the-amazon-rain-forest/) |
79 | | 10 | Analyze the impact of proposed roads on the local environment | [Predict deforestation in the Amazon rain forest](https://learn.arcgis.com/en/projects/predict-deforestation-in-the-amazon-rain-forest/) |
80 | | 11 | Create charts in Python to explore coral and sponge distribution around Catalina Island | [Chart coral and sponge distribution](https://learn.arcgis.com/en/projects/chart-coral-and-sponge-distribution-factors-with-python/) |
81 | | 12 | Find optimal corridors to connect dwindling mountain lion populations | [Build a model to connect mountain lion habitat](https://learn.arcgis.com/en/projects/build-a-model-to-connect-mountain-lion-habitat/) |
82 | | 13 | Understand the relationship between ocean temperature and salinity at various depths in the South Atlantic Ocean | [SciTools Iris](https://github.com/SciTools/iris) |
83 | | 14 | Detect persistent periods of high temperature over the past 240 years | [SciTools Iris](https://github.com/SciTools/iris) |
84 | | 15 | Understand the geographical distribution of Total Electron Content (TEC) in the ionosphere | [SciTools Iris](https://github.com/SciTools/iris) |
85 | | 16 | Analyze climate change trends in North America using spatiotemporal data | [SciTools Iris](https://github.com/SciTools/iris) |
86 | | 17 | Analyze the geographical distribution of fatal car crashes in New York City during 2016 | [Pointplot of NYC fatal and injurious traffic collisions](https://github.com/ResidentMario/geoplot/blob/master/examples/plot_nyc_collisions_map.py) |
87 | | 18 | Analyze street tree species data in San Francisco | [Quadtree of San Francisco street trees](https://github.com/ResidentMario/geoplot/blob/master/examples/plot_san_francisco_trees.py) |
88 | | 19 | Model spatial patterns of water quality | [Model water quality](https://learn.arcgis.com/en/projects/model-water-quality-using-interpolation/) |
89 | | 20 | Predict the likelihood of tin-tungsten deposits in Tasmania | [Geospatial ML Challenges: A prospectivity analysis example](https://github.com/Solve-Geosolutions/transform_2022) |
90 | | 21 | Find optimal corridors to connect dwindling mountain lion populations(2) | [Build a model to connect mountain lion habitat](https://learn.arcgis.com/en/projects/build-a-model-to-connect-mountain-lion-habitat/) |
91 | | 22 | Find optimal corridors to connect dwindling mountain lion populations(3) | [Build a model to connect mountain lion habitat](https://learn.arcgis.com/en/projects/build-a-model-to-connect-mountain-lion-habitat/) |
92 | | 23 | Assess Open Space to Lower Flood Insurance Cost | [Assess open space to lower flood insurance cost](https://learn.arcgis.com/en/projects/assess-open-space-to-lower-flood-insurance-cost/) |
93 | | 24 | Provide a de-identified point-level dataset that includes all the variables of interest for each child, as well as their general location | [De-identify health data for visualization and sharing](https://learn.arcgis.com/en/projects/de-identify-health-data-for-visualization-and-sharing/) |
94 | | 25 | Create risk maps for transmission, susceptibility, and resource scarcity. Then create a map of risk profiles to help pinpoint targeted intervention areas | [Analyze COVID-19 risk using ArcGIS Pro](https://learn.arcgis.com/en/projects/analyze-covid-19-risk-using-arcgis-pro/) |
95 | | 26 | Use drainage conditions and water depth to calculate groundwater vulnerable areas | [Identify groundwater vulnerable areas](https://learn.arcgis.com/en/projects/identify-groundwater-vulnerable-areas/) |
96 | | 27 | Identify undeveloped areas from groundwater risk zones | [Identify groundwater vulnerable areas](https://learn.arcgis.com/en/projects/identify-groundwater-vulnerable-areas/) |
97 | | 28 | Estimate the origin-destination (OD) flows between regions based on the socioeconomic attributes of regions and the mobility data | [ScienceDirect - OD Flow Estimation](https://www.sciencedirect.com/science/article/pii/S2210670724008382) |
98 | | 29 | Calculate Travel Time for a Tsunami | [Calculate travel time for a tsunami](https://learn.arcgis.com/en/projects/calculate-travel-time-for-a-tsunami/) |
99 | | 30 | Designate bike routes for commuting professionals | [Designate bike routes](https://desktop.arcgis.com/en/analytics/case-studies/designate-bike-routes-for-commuters.htm) |
100 | | 31 | Detect aggregation scales of geographical flows | [Geographical Flow Aggregation](https://www.tandfonline.com/doi/full/10.1080/13658816.2020.1749277) |
101 | | 32 | Find optimal corridors to connect dwindling mountain lion populations | [Build a model to connect mountain lion habitat](https://learn.arcgis.com/en/projects/build-a-model-to-connect-mountain-lion-habitat/) |
102 | | 33 | Analyze the impacts of land subsidence on flooding | [Model how land subsidence affects flooding](https://learn.arcgis.com/en/projects/model-how-land-subsidence-affects-flooding/) |
103 | | 34 | Estimate the accessibility of roads to rural areas in Japan | [Estimate access to infrastructure](https://learn.arcgis.com/en/projects/estimate-access-to-infrastructure/) |
104 | | 35 | Calculate landslide potential for communities affected by wildfires | [Landslide Potential Calculation](https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/overview-of-spatial-analyst.htm) |
105 | | 36 | Compute the change in vegetation before and after a hailstorm with the SAVI index | [Assess hail damage in cornfields with satellite imagery](https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/ndvi.htm) |
106 | | 37 | Analyze human sentiments of heat exposure using social media data | [National-level Analysis using Twitter Data](https://platform.i-guide.io/notebooks/6c518fed-0a65-4858-949e-24ee8dc4d85b) |
107 | | 38 | Calculate travel time from one location to others in a neighborhood | [Intro to OSM Network Data](https://platform.i-guide.io/notebooks/02f9b712-f4ac-47bc-9382-3c1e0f37b4e3) |
108 | | 39 | Train a Geographically Weighted Regression model to predict Georgia's Bachelor's degree rate | [Geographically Weighted Regression Demo](https://platform.i-guide.io/notebooks/d8926bb3-864d-4542-8027-02fc6edc868f) |
109 | | 40 | Calculate and visualize changes in malaria prevalence | [Visualizing Shrinking Malaria Rates](https://www.esri.com/arcgis-blog/products/arcgis-pro/mapping/visualize-shrinking-malaria-rates-in-africa/) |
110 | | 41 | Improve campsite data quality using a relationship class | [Improve campsite data](https://learn.arcgis.com/en/projects/improve-campsite-data-quality-using-a-relationship-class/) |
111 | | 42 | Investigate spatial patterns for Airbnb prices in Berlin | [Determine dangerous roads for drivers](https://learn.arcgis.com/en/projects/determine-the-most-dangerous-roads-for-drivers/) |
112 | | 43 | Use animal GPS tracks to model home range to understand where they are and how they move over time | [Model animal home range](https://learn.arcgis.com/en/projects/model-animal-home-range/) |
113 | | 44 | Find gap for Toronto fire station service coverage | [Get started with Python in ArcGIS Pro](https://pro.arcgis.com/en/pro-app/latest/arcpy/get-started/what-is-arcpy-.htm) |
114 | | 45 | Find optimal corridors to connect dwindling mountain lion populations | [Build a model to connect mountain lion habitat](https://learn.arcgis.com/en/projects/build-a-model-to-connect-mountain-lion-habitat/) |
115 | | 46 | Identify hot spots for peak crashes | [Determine the most dangerous roads for drivers](https://learn.arcgis.com/en/projects/determine-the-most-dangerous-roads-for-drivers/) |
116 | | 47 | Calculate impervious surface area | [Calculate impervious surfaces](https://learn.arcgis.com/en/projects/calculate-impervious-surfaces-from-spectral-imagery/) |
117 | | 48 | Determine how location impacts interest rates | [Impact of Location on Interest Rates](https://learn.arcgis.com/en/projects/determine-how-location-impacts-interest-rates/) |
118 | | 49 | Mapping the Impact of Housing Shortage on Oil Workers | [Homeless in the Badlands](https://learn.arcgis.com/en/projects/homeless-in-the-badlands/arcgis-pro/) |
119 | | 50 | Predict seagrass habitats | [Predict seagrass habitats with machine learning](https://learn.arcgis.com/en/projects/predict-seagrass-habitats-with-machine-learning/#prepare-training-data) |
120 |
121 |
122 |
123 |
124 | ## Case Study 1 (Task 43): Identification of Home Range and Spatial Clusters from Animal
125 | Movements
126 | Understanding elk movement patterns is critical for wildlife conservation and management in the field of animal ecology. The task needs to identify elk home ranges in Southwestern Alberta, 2009 using GPS-tracking locations. In doing so, researchers are able to analyze their space use and movement clusters for elk populations. Understanding the home range of the elk population is essential for ensuring sustainability and stability of the wildlife.
127 |
128 | 
129 |
130 | ### Dataset
131 | • berling_neighbourhoods.geojson: Geojson file for multipolygons of neighbourhoods in Berling, properties include "neighbourhood" and "neighbourhood_group".
132 |
133 | • berlin-listings.csv: CSV file of Berling Airbnb information, with lat and lng of Airbnb.
134 |
135 |
136 | ### Prompts
137 |
138 | Click to expand/collapse Workflow Prompts
139 |
140 | > As a Geospatial data scientist, you will generate a workflow to a proposed task.
141 | >
142 | > [Task]:
143 | > Use animal GPS tracks to model home range to understand where they are and how they move over time.
144 | >
145 | > [Instruction]:
146 | > Your task is to analyze and visualize elk movements using the provided dataset. The goal is to estimate
147 | > home ranges and assess habitat preferences using spatial analysis techniques, including Minimum
148 | > Bounding Geometry (Convex Hull), Kernel Density Estimation, and Density-Based Clustering (DBSCAN).
149 | > The analysis will generate spatial outputs stored in "dataset/elk_home_range.gdb" and "dataset/".
150 | >
151 | > [Domain Knowledge]:
152 | > "Home range" can be defined as the area within which an animal normally lives and finds what it needs
153 | > for survival. Basically, the home range is the area that an animal travels for its normal daily activities.
154 | > "Minimum Bounding Geometry" creates a feature class containing polygons which represent a specified
155 | > minimum bounding geometry enclosing each input feature or each group of input features. "Convex
156 | > hull" is the smallest convex polygon that can enclose a group of objects, such as a group of points.
157 | > "Kernel Density Mapping" calculates and visualizes features's density in a given area. "DBSCAN",
158 | > Density-Based Spatial Clustering of Applications with Noise that cluster the points based on density
159 | > criterion.
160 | > [Dataset Description]:
161 | > dataset/Elk_in_Southwestern_Alberta_2009.geojson: geojson files for storing points of Elk
162 | > movements in Southwestern Alberta 2009.
163 | >
164 | > Columns of dataset/Elk_in_Southwestern_Alberta_2009.geojson:
165 | > 'OBJECTID', 'timestamp', 'long', 'lat', 'comments', 'external_t', 'dop',
166 | > 'fix_type_r', 'satellite_', 'height', 'crc_status', 'outlier_ma',
167 | > 'sensor_typ', 'individual', 'tag_ident', 'ind_ident', 'study_name',
168 | > 'date', 'time', 'timestamp_Converted', 'summer_indicator', 'geometry'
169 | >
170 | >
171 | > [Key Notes]:
172 | > 1.Use **automatic reasoning** and clearly explain each step (Chain of Thoughts approach).
173 | >
174 | > 2.Using **NetworkX* package for visualization.
175 | >
176 | > 3.Using 'dot' for graph visualization layout.
177 | >
178 | > 4.Multiple subtasks can be proceeded correspondingly because
179 | > all of their outputs will be inputs for the next subtask.
180 | >
181 | > 5.Limiting your output to code, no extra information.
182 | >
183 | > 6.Only codes for workflow, no implementation.
184 | >
185 | > [Expected Sample Output Begin]
186 | >
187 | > """
188 | >
189 | > tasks = [Task1, Task2, Task3]
190 | >
191 | > G = nx.DiGraph()
192 | >
193 | > for i in range(len(tasks) - 1):
194 | >
195 | > G.add_edge(tasks[i], tasks[i + 1])
196 | >
197 | > pos = nx.drawing.nx_pydot.graphviz_layout(G, prog="dot")
198 | >
199 | > plt.figure(figsize=(15, 8))
200 | >
201 | > nx.draw(G, pos, with_labels=True, node_size=3000, node_color='lightblue', font_size=10, font_weight='bold', arrowsize=20)
202 | >
203 | > plt.title("Workflow for Analyzing Urban Heat Using Kriging Interpolation", fontsize=14)
204 | >
205 | > plt.show()
206 | >
207 | > """
208 | >
209 | > [Expected Sample Output End]
210 |
211 |
212 |
213 | Click to expand/collapse Code Generation Prompts
214 |
215 | > As a Geospatial data scientist, generate a python file to solve the proposed task.
216 | >
217 | > [Task]:
218 | > Use animal GPS tracks to model home range to understand where they are and how they move over time.
219 | >
220 | > [Instruction]:
221 | > Your task is to analyze and visualize elk movements using the provided dataset. The goal is to estimate
222 | > home ranges and assess habitat preferences using spatial analysis techniques, including Minimum
223 | > Bounding Geometry (Convex Hull), Kernel Density Estimation, and Density-Based Clustering (DBSCAN).
224 | > The analysis will generate spatial outputs stored in ""dataset/elk_home_range.gdb"" and ""dataset/"".
225 | >
226 | > [Domain Knowledge]:
227 | > "Home range" can be defined as the area within which an animal normally lives and finds what it needs
228 | > for survival. Basically, the home range is the area that an animal travels for its normal daily activities.
229 | >
230 | > "Minimum Bounding Geometry" creates a feature class containing polygons which represent a specified
231 | > minimum bounding geometry enclosing each input feature or each group of input features.
232 | >
233 | > "Convex hull" is the smallest convex polygon that can enclose a group of objects, such as a group of points.
234 | >
235 | > "Kernel Density Mapping" calculates and visualizes features's density in a given area. "DBSCAN",
236 | > Density-Based Spatial Clustering of Applications with Noise that cluster the points based on density
237 | > criterion.
238 | >
239 | > [Dataset Description]:
240 | > dataset/Elk_in_Southwestern_Alberta_2009.geojson: geojson files for storing points of Elk
241 | > movements in Southwestern Alberta 2009.
242 | >
243 | > Columns of dataset/Elk_in_Southwestern_Alberta_2009.geojson:
244 | > 'OBJECTID', 'timestamp', 'long', 'lat', 'comments', 'external_t', 'dop',
245 | > 'fix_type_r', 'satellite_', 'height', 'crc_status', 'outlier_ma',
246 | > 'sensor_typ', 'individual', 'tag_ident', 'ind_ident', 'study_name',
247 | > 'date', 'time', 'timestamp_Converted', 'summer_indicator', 'geometry'
248 | >
249 | >
250 | >
251 | > [Key Notes]:
252 | > 1.Use **automatic reasoning** and clearly explain each subtask before performing it (ReAct approach).
253 | >
254 | > 2.Using latest python packages for code generation
255 | >
256 | > 3.Put all code under main function, no helper functions
257 | >
258 | > 4.Limit your output to code, no extra information.
259 | >
260 | > 5.Use latest **Arcpy** functions only
261 | "
262 |
263 |
264 |
265 | ### Results
266 |
267 |
268 |
269 |
270 | ## Case Study 2 (Task 46): Spatial Hotspot Analysis of Car Accidents
271 | The second case study is about spatial hotspot analysis of car accidents. The Brevard County in Florida has one of the
272 | deadliest interstate highways in the United States. This case study aims to identify the spatially distributed hot spots
273 | along the road network. The dataset includes road network, crash locations from 2010 to 2015, and a network spatial
274 | weighting matrix. Understanding the hot spots for car accidents is essential for the local transportation department
275 | to make policies and quick responses for future accidents.
276 |
277 | 
278 | ### Dataset
279 | • roads.shp: The road network of Brevard County.
280 |
281 | • crashes.shp: The locations of crashes in Brevard County, Florida between 2010 and 2015.
282 |
283 | • nwswm360ft.swm: Spatial weights matrix file created using the Generate Network Spatial Weights tool and a street network built from Brevard County road polylines.
284 |
285 |
286 | ### Prompts
287 |
288 | Click to expand/collapse Workflow Prompts
289 |
290 | > As a Geospatial data scientist, you will generate a workflow to a proposed task.
291 |
292 | > [Task]:
293 | > Identify hot spots for peak crashes
294 | >
295 | > [Instruction]:
296 | > Your task is identifying hot spots for peak crashes in Brevard County, Florida, 2010 - 2015. The first
297 | > step is select all the crashes based on peak time zone. Create a copy of selected crashes data. Then,
298 | > snap the crashes points to the road network and spatial join with the road. Calculate the crash rate
299 | > based on the joint data and use hot spot analysis to get crash hot spot map as the result.
300 | >
301 | > [Domain Knowledge]:
302 | > We consider traffic between time zone 3pm to 5pm in weekdays as peak. For snap process, the recommend
303 | > buffer on roads is 0.25 miles. Hot spot analysis looks for high crash rates that cluster close together,
304 | > accurate distance measurements based on the road network are essential.
305 | >
306 | > [Dataset Description]:
307 | > dataset/crashes.shp: The locations of crashes in Brevard County, Florida between 2010 and 2015.
308 |
309 | > dataset/roads.shp: The road network of Brevard County.
310 | >
311 | > dataset/nwswm360ft.swm: Spatial weights matrix file created using the Generate Network Spatial
312 | >
313 | > Weights tool and a street network built from Brevard County road polylines.
314 |
315 | > [Key Notes]:
316 | > 1.Use **automatic reasoning** and clearly explain each step (Chain of Thoughts approach).
317 | >
318 | > 2.Using **NetworkX* package for visualization.
319 | >
320 | > 3.Using 'dot' for graph visualization layout.
321 | >
322 | > 4.Multiple subtasks can be proceeded correspondingly because
323 | > all of their outputs will be inputs for the next subtask.
324 | >
325 | > 5.Limiting your output to code, no extra information.
326 | >
327 | > 6.Only codes for workflow, no implementation.
328 |
329 | >[Expected Sample Output Begin]
330 |
331 | >"""
332 | >
333 | > tasks = [Task1, Task2, Task3]
334 |
335 | > G = nx.DiGraph()
336 | >
337 | > for i in range(len(tasks) - 1):
338 | >
339 | > G.add_edge(tasks[i], tasks[i + 1])
340 | >
341 | > pos = nx.drawing.nx_pydot.graphviz_layout(G, prog="dot")
342 | >
343 | > plt.figure(figsize=(15, 8))
344 | >
345 | > nx.draw(G, pos, with_labels=True, node_size=3000, node_color='lightblue', font_size=10, font_weight='bold', arrowsize=20)
346 | >
347 | > plt.title("Workflow for Analyzing Urban Heat Using Kriging Interpolation", fontsize=14)
348 | >
349 | > plt.show()
350 | >
351 | >"""
352 | >
353 | >[Expected Sample Output End]
354 |
355 |
356 |
357 |
358 | Click to expand/collapse Code Generation Prompts
359 |
360 | > As a Geospatial data scientist, generate a python file to solve the proposed task.
361 | >
362 | > [Task]:
363 | > Identify hot spots for peak crashes
364 | >
365 | > [Instruction]:
366 | > Your task is identifying hot spots for peak crashes in Brevard County, Florida, 2010 - 2015. The first
367 | > step is select all the crashes based on peak time zone. Create a copy of selected crashes data. Then,
368 | > snap the crashes points to the road network and spatial join with the road. Calculate the crash rate
369 | > based on the joint data and use hot spot analysis to get crash hot spot map as the result.
370 | >
371 | > [Domain Knowledge]:
372 | > We consider traffic between time zone 3pm to 5pm in weekdays as peak. For snap process, the recommend
373 | > buffer on roads is 0.25 miles. Hot spot analysis looks for high crash rates that cluster close together,
374 | > accurate distance measurements based on the road network are essential.
375 | >
376 | > [Dataset Description]:
377 | > dataset/crashes.shp: The locations of crashes in Brevard County, Florida between 2010 and 2015.
378 | >
379 | > dataset/roads.shp: The road network of Brevard County.
380 | >
381 | > dataset/nwswm360ft.swm: Spatial weights matrix file created using the Generate Network Spatial
382 | > Weights tool and a street network built from Brevard County road polylines.
383 | >
384 | > [Key Notes]:
385 | > 1. Use **automatic reasoning** and clearly explain each subtask before performing it (ReAct approach).
386 | >
387 | > 2. Using latest python packages for code generation
388 | >
389 | > 3. Put all code under main function, no helper functions
390 | >
391 | > 4. Limit your output to code, no extra information.
392 | >
393 | > 5. Use latest **Arcpy** functions only
394 |
395 |
396 | ### Results
397 |
398 |
399 |
400 |
401 |
402 | ## Acknowledgement
403 | We acknowledge the funding support from the National Science Foundation funded AI institute [Grant No. 2112606] for Intelligent Cyberinfrastructure with Computational Learning in the Environment (ICICLE). Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the funder(s).
404 |
--------------------------------------------------------------------------------