├── .gitignore ├── .ipynb_checkpoints ├── 4_visualize_imagery_over_time-checkpoint.ipynb └── 6_mosaicing-and-masking-checkpoint.ipynb ├── Analytics ├── .gitignore ├── 01_checking_available_feeds_and_subscriptions.ipynb ├── 02_fetching_feed_results.ipynb ├── 02_fetching_vector_feed_results.ipynb ├── 03_visualizing_raster_results.ipynb ├── api_concepts_deep_dive.ipynb ├── feed_results_deep_dive.ipynb └── plnbstyles.py ├── Data_API ├── .ipynb_checkpoints │ └── data_api_introductions-checkpoint.ipynb ├── data_api_introductions.ipynb ├── interactive_training │ ├── README.md │ └── data-interactive.ipynb ├── planet-explorer.png └── requirements.txt ├── GEE_delivery ├── .ipynb_checkpoints │ └── gee-notebook-checkpoint.ipynb ├── gee-notebook.ipynb └── gee.json ├── LICENSE ├── Misc ├── .ipynb_checkpoints │ ├── mosaicking_and_masking-checkpoint.ipynb │ └── viz_imagery_over_time-checkpoint.ipynb ├── Forestry │ ├── .ipynb_checkpoints │ │ └── 6_compositing_demo-checkpoint.ipynb │ ├── 6_compositing_demo.ipynb │ ├── drc_roads_classification.ipynb │ ├── drc_roads_download.ipynb │ ├── drc_roads_mosaic.ipynb │ ├── drc_roads_temporal_analysis.ipynb │ └── drc_roads_udm2.ipynb ├── data │ └── mt-dana-small.geojson ├── how-to-ql.md ├── images │ ├── explorer-data-order.png │ ├── explorer-mount-dana.gif │ ├── final_in_qgis.png │ ├── pe-mtdana.gif │ └── planet-explorer.png ├── mosaicking_and_masking.ipynb ├── ndvi_ps_sr.ipynb ├── udm_download_viz.ipynb └── viz_imagery_over_time.ipynb ├── OrdersAPI ├── .ipynb_checkpoints │ ├── gee-notebook-checkpoint.ipynb │ ├── ordering_and_delivery-checkpoint.ipynb │ └── tools_and_toolchains-checkpoint.ipynb ├── ordering_and_delivery.ipynb ├── orders-delivery.js └── tools_and_toolchains.ipynb ├── PlanetCLI ├── cli_workshop.md ├── imgs │ ├── qgis-aoi.png │ ├── qgis-auth.png │ ├── qgis-repro.png │ ├── qgis-results.png │ ├── qgis-search.png │ └── qgis-viz.png ├── reproject.json └── savoy.geojson ├── PlanetIntegrations └── Planet-for-QGIS-Plugin.zip ├── Planetary_Variables_API ├── .ipynb_checkpoints │ └── Planetary Variables Subscriptions API-checkpoint.ipynb ├── Files_for_subscription_demo │ └── demo_field_1.geojson ├── Gridded_Data_API.ipynb ├── Planetary Variables Subscriptions API.ipynb └── Time_Series_Data_API.ipynb ├── README.md ├── RemoteSensing101 ├── Inspecting_Satellite_Imagery.ipynb ├── Visualizing_Satellite_Imagery.ipynb ├── example.tif └── pixels2.png ├── Subscription_API └── Subscription_API.ipynb ├── Tasking ├── .ipynb_checkpoints │ └── tasking_intro_python_training-checkpoint.ipynb └── tasking_intro_python_training.ipynb ├── TilesAPI ├── .ipynb_checkpoints │ └── WMTS_URL_generation-checkpoint.ipynb └── WMTS_URL_generation.ipynb ├── one2many ├── Basemaps │ └── Basemap_streaming.ipynb ├── Data & Orders API training │ ├── planet_python_client_introduction.ipynb │ ├── sf_84.geojson │ ├── sf_UTM.geojson │ └── sf_all.geojson ├── REST APIs │ ├── REST_API_Intro.ipynb │ └── sf.geojson └── Tools and Toolchains │ └── tools_and_toolchains.ipynb └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Binary or system files 2 | *.csv 3 | *.xlsx 4 | *.pyc 5 | .~lock* 6 | *.docx 7 | .idea/ 8 | __pycache__/ 9 | .DS_Store -------------------------------------------------------------------------------- /Analytics/.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary files 2 | *.md 3 | Tester* 4 | -------------------------------------------------------------------------------- /Analytics/01_checking_available_feeds_and_subscriptions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Summarizing Feeds and Subscriptions\n", 8 | "This notebook demonstrates how to describe available [Analytics Feeds](https://developers.planet.com/docs/analytics/#analytic-feeds) and [Subscriptions](https://developers.planet.com/docs/analytics/#subscriptions) with the Planet Analytic API." 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "## Setup\n", 16 | "To use this notebook, you need to have the following:\n", 17 | "- A Planet account with access to the Analytics API\n", 18 | "- A Planet API Key" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "#### Set API Key" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "import os\n", 35 | "import folium\n", 36 | "import requests\n", 37 | "import pandas as pd\n", 38 | "\n", 39 | "# if your Planet API Key is not set as an environment variable, you can paste it below\n", 40 | "API_KEY = os.environ.get('PL_API_KEY')\n", 41 | "# construct auth tuple for use in the requests library\n", 42 | "BASIC_AUTH = (API_KEY, '')" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "#### Set the base url for the Planet Analytic Feeds product\n", 50 | "See the [Analytics API Docs](https://developers.planet.com/docs/analytics/) for more details." 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "BASE_URL = \"https://api.planet.com/analytics/\"" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "#### Test API Connection" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "resp = requests.get(BASE_URL, auth=BASIC_AUTH)\n", 76 | "if resp.status_code == 200:\n", 77 | " print('Yay, you can access the Analytics API')\n", 78 | "else:\n", 79 | " print('Something is wrong:', resp.content)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "## Summarizing Feeds\n", 87 | "In this section, we will see describe the available feeds." 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "#### How many feeds are there?" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "limit = 1000\n", 104 | "feed_list_url = f'{BASE_URL}feeds?limit={limit}'\n", 105 | "print(f'Feeds endpoint: {feed_list_url}')\n", 106 | "resp = requests.get(feed_list_url, auth=BASIC_AUTH)\n", 107 | "feeds = resp.json()['data']\n", 108 | "feed_count = len(feeds)\n", 109 | "print(f'Available feeds: {feed_count}')\n", 110 | "if feed_count >= limit:\n", 111 | " print('More feeds are probably available through pagination links')\n", 112 | " print(resp.json()['links'])" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "#### Inspecting feed metadata" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "from pprint import pprint\n", 129 | "pprint(feeds[0])" 130 | ] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "metadata": {}, 135 | "source": [ 136 | "**Some of the fields include:**\n", 137 | "- id: this is a unique identifier for a feed\n", 138 | "- title: a human friendly name for the feed\n", 139 | "- description: more detail text about the feed\n", 140 | "- created: timestamp for when the feed was originally created\n", 141 | "- updated: timestamp for when the feed was last modified\n", 142 | "- source: a blob describing the imagery on which the feed is based\n", 143 | "- target: a blob describing the feed's output format\n", 144 | "- links: a list of blobs containing urls to related resources" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "#### Showing the first ten feeds in a table\n", 152 | "We can use a pandas DataFrame to summarize the available feeds." 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "\n", 162 | "# bump this up in case there are many available feeds to display\n", 163 | "pd.options.display.max_rows = 1000\n", 164 | "# make a dataframe from the feeds json data\n", 165 | "df = pd.DataFrame(feeds[:10])\n", 166 | "# instead of including the entire source and target dicts, make columns for the types\n", 167 | "df['targetType'] = df['target'].map(lambda t: t['type'])\n", 168 | "df['sourceType'] = df['source'].map(lambda t: t[0]['type'])\n", 169 | "df[['id', 'title', 'description', 'sourceType', 'targetType', 'created', 'updated']]\n" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "## Summarizing Subscriptions\n", 177 | "Now that we know about available feeds, let's check out available subscriptions." 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "limit = 1000\n", 187 | "subscriptions_url = f'{BASE_URL}subscriptions?limit={limit}'\n", 188 | "print(f'Subscriptions endpoint: {subscriptions_url}')\n", 189 | "resp = requests.get(subscriptions_url, auth=BASIC_AUTH)\n", 190 | "subs = resp.json()['data']\n", 191 | "sub_count = len(subs)\n", 192 | "print(f'Available subscriptions: {sub_count}')\n", 193 | "if sub_count >= limit:\n", 194 | " print('More subscriptions are probably available through pagination links')\n", 195 | " print(resp.json()['links'])" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "#### What's in a subscription?" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "pprint(subs[0])" 212 | ] 213 | }, 214 | { 215 | "cell_type": "markdown", 216 | "metadata": {}, 217 | "source": [ 218 | "Subscriptions also have id, title, description, created, and updated fields.\n", 219 | "Additionally, there are fields for:\n", 220 | "- feedID: which feed this subscription is for\n", 221 | "- startTime: timestamp for the subscription's beginning\n", 222 | "- endTime: timestamp for the subscription's ending\n", 223 | "- geometry: spatial area of interest to which the subscription has access\n", 224 | "\n", 225 | "**Important:** \n", 226 | "Subscriptions will only get results for source imagery observed between the `startTime` and `endTime` within the specified `geometry`." 227 | ] 228 | }, 229 | { 230 | "cell_type": "markdown", 231 | "metadata": {}, 232 | "source": [ 233 | "`created` and `updated` refer to when the subscription itself was set up or modified and **do not** impact results that show up for the subscription. \n", 234 | "\n", 235 | "`startTime` and `endTime` **do** limit feed results for the subscription." 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "#### Showing all available subscriptions" 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "df = pd.DataFrame(subs[:10])\n", 252 | "df[['id', 'title', 'description', 'feedID', 'startTime', 'endTime', 'created', 'updated']]" 253 | ] 254 | }, 255 | { 256 | "cell_type": "markdown", 257 | "metadata": {}, 258 | "source": [ 259 | "#### Filtering subscriptions by feed" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": null, 265 | "metadata": {}, 266 | "outputs": [], 267 | "source": [ 268 | "feed_id = feeds[0]['id']\n", 269 | "feed_title = feeds[0]['title']\n", 270 | "print(feed_title)\n", 271 | "print('id:', feed_id)" 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "metadata": {}, 278 | "outputs": [], 279 | "source": [ 280 | "filtered_subscriptions_url = f'{BASE_URL}subscriptions?feedID={feed_id}'\n", 281 | "print('url:', filtered_subscriptions_url)\n", 282 | "resp = requests.get(filtered_subscriptions_url, auth=BASIC_AUTH)\n", 283 | "filtered_subs = resp.json()['data']\n", 284 | "filtered_sub_count = len(filtered_subs)\n", 285 | "print(f'You have access to {filtered_sub_count} subscriptions for feed {feed_id} ({feed_title})')" 286 | ] 287 | }, 288 | { 289 | "cell_type": "markdown", 290 | "metadata": {}, 291 | "source": [ 292 | "#### Inspecting a subscription's geometry\n", 293 | "Subscriptions have a spatial area of interest described by a geojson geometry. We can visualize the area of interest for a subscription on a map." 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "metadata": {}, 300 | "outputs": [], 301 | "source": [ 302 | "# get the latest subscription's geometry\n", 303 | "subscription = subs[0]\n", 304 | "geom = subscription['geometry']\n", 305 | "\n", 306 | "print(geom['coordinates'][0][0][0])\n", 307 | "lon, lat = geom['coordinates'][0][0][0]\n", 308 | "m = folium.Map(location=[lat, lon], zoom_start=5)\n", 309 | "\n", 310 | "geo_json_data = geom\n", 311 | "\n", 312 | "folium.GeoJson(geo_json_data).add_to(m)\n", 313 | "m" 314 | ] 315 | } 316 | ], 317 | "metadata": { 318 | "kernelspec": { 319 | "display_name": "Python 3 (ipykernel)", 320 | "language": "python", 321 | "name": "python3" 322 | }, 323 | "language_info": { 324 | "codemirror_mode": { 325 | "name": "ipython", 326 | "version": 3 327 | }, 328 | "file_extension": ".py", 329 | "mimetype": "text/x-python", 330 | "name": "python", 331 | "nbconvert_exporter": "python", 332 | "pygments_lexer": "ipython3", 333 | "version": "3.9.5" 334 | } 335 | }, 336 | "nbformat": 4, 337 | "nbformat_minor": 4 338 | } 339 | -------------------------------------------------------------------------------- /Analytics/02_fetching_feed_results.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Planet Analytics API Tutorial\n", 8 | "#### This notebook is an extended version of the official quickstart guide in [here](https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/analytics/quickstart/02_fetching_feed_results.ipynb) as was customized to download Change Detection feeds into monthly vector files. To have a look at more Analytic guides, visit our DevRel [notebooks](https://github.com/planetlabs/notebooks/tree/master/jupyter-notebooks).\n", 9 | "\n", 10 | "# Getting Analytic Feed Results\n", 11 | "This notebook shows how to paginate through Planet Analytic Feed Results for an existing analytics [Subscription](https://developers.planet.com/docs/analytics/#subscriptions) to construct a combined [geojson](https://geojson.org/) feature collection that can be imported into geospatial analysis tools." 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "## Setup \n", 19 | "To use this notebook, you need an api key for a Planet account with access to the Analytics API.\n", 20 | "#### API Key and Test Connection\n", 21 | "Set `API_KEY` below if it is not already in your notebook as an environment variable.\n", 22 | "See the [Analytics API Docs](https://developers.planet.com/docs/analytics/) for more details on authentication." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import os\n", 32 | "import requests\n", 33 | "\n", 34 | "# construct auth tuple for use in the requests library\n", 35 | "BASIC_AUTH = ('YOUR_API_KEY', '')\n", 36 | "BASE_URL = \"https://api.planet.com/analytics/\"\n", 37 | "\n", 38 | "subscriptions_list_url = BASE_URL + 'subscriptions' + '?‚limit=1000'\n", 39 | "resp = requests.get(subscriptions_list_url, auth=BASIC_AUTH)\n", 40 | "if resp.status_code == 200:\n", 41 | " print('Yay, you can access the Analytics API')\n", 42 | " subscriptions = resp.json()['data']\n", 43 | " print('Available subscriptions:', len(subscriptions))\n", 44 | "else:\n", 45 | " print('Something is wrong:', resp.content)" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "#### Specify Analytics Subscription of Interest\n", 53 | "Below we will list your available subscription ids and some metadata in a dataframe and then select a subscription of interest." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "import pandas as pd\n", 63 | "pd.options.display.max_rows = 1000\n", 64 | "df = pd.DataFrame(subscriptions)\n", 65 | "df['start'] = pd.to_datetime(df['startTime']).dt.date\n", 66 | "df['end'] = pd.to_datetime(df['endTime']).dt.date\n", 67 | "df[['id', 'title', 'description', 'start', 'end']]" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "Pick a subscription from which to pull results, copy its ID and replace below." 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "# This example ID is for a subscription of building change detections in Kabul\n", 84 | "# You can replace this ID with your own subscription ID\n", 85 | "SUBSCRIPTION_ID = 'ea5180c6-7846-4e64-a8b1-b7fc112f800d'" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "## Getting subscription results\n", 93 | "In this section, we will make sure that we can get data from the subscription of interest by fetching the latest page of results." 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "import json\n", 103 | "\n", 104 | "# Construct the url for the subscription's results collection\n", 105 | "subscription_results_url = BASE_URL + 'collections/' + SUBSCRIPTION_ID + '/items'\n", 106 | "print(\"Request URL: {}\".format(subscription_results_url))\n", 107 | "\n", 108 | "# Get subscription results collection\n", 109 | "resp = requests.get(subscription_results_url, auth=BASIC_AUTH)\n", 110 | "if resp.status_code == 200:\n", 111 | " print('Yay, you can access analytic feed results!')\n", 112 | " subscription_results = resp.json()\n", 113 | " print(json.dumps(subscription_results, sort_keys=True, indent=4))\n", 114 | "else:\n", 115 | " print('Something is wrong:', resp.content)" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "## Pagination" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "The response json above will only include the most recent 250 detections by default. For subscriptions with many results, you can page through " 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "print(len(subscription_results['features']))" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "More results can be fetched by following the `next` link. Let's look at the links section of the response:" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "subscription_results['links']" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "To get more results, we will want the link with a `rel` of `next`" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": null, 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "def get_next_link(results_json):\n", 171 | " \"\"\"Given a response json from one page of subscription results, get the url for the next page of results.\"\"\"\n", 172 | " for link in results_json['links']:\n", 173 | " if link['rel'] == 'next':\n", 174 | " return link['href']\n", 175 | " return None" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "next_link = get_next_link(subscription_results)\n", 185 | "print('next page url: {}'.format(next_link))" 186 | ] 187 | }, 188 | { 189 | "cell_type": "markdown", 190 | "metadata": {}, 191 | "source": [ 192 | "If the result above is `None`, it means your subscription has less than 250 results and you don't need to run the following cell. If there are more than 250 detections, the return will be a URL. Using this url, we can fetch the next page of results." 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": null, 198 | "metadata": {}, 199 | "outputs": [], 200 | "source": [ 201 | "next_results = requests.get(next_link, auth=BASIC_AUTH).json()\n", 202 | "print(json.dumps(next_results, sort_keys=True, indent=4))" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "## Aggregating results" 210 | ] 211 | }, 212 | { 213 | "cell_type": "markdown", 214 | "metadata": {}, 215 | "source": [ 216 | "Each page of results comes as one feature collection. We can combine the features from different pages of results into one big feature collection. Below we will page through all results in the subscription from the past 12 months and make a combined feature collection.\n", 217 | "\n", 218 | "Results in the API are ordered by a `created` timestamp. This corresponds the time that the feature was published to a Feed and does not necessarily match the `observed` timestamp in the feature's properties, which corresponds to when the source imagery for a feature was collected." 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": null, 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [ 227 | "latest_feature = subscription_results['features'][0]\n", 228 | "creation_datestring = latest_feature['created']\n", 229 | "print('latest feature creation date:', creation_datestring)" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "from dateutil.parser import parse\n", 239 | "# this date string can be parsed as a datetime and converted to a date\n", 240 | "latest_date = parse(creation_datestring).date()\n", 241 | "latest_date" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [ 250 | "from datetime import timedelta\n", 251 | "min_date = latest_date - timedelta(days=365)\n", 252 | "print('Aggregate all detections from after this date:', min_date)" 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": null, 258 | "metadata": {}, 259 | "outputs": [], 260 | "source": [ 261 | "feature_collection = {'type': 'FeatureCollection', 'features': []}\n", 262 | "next_link = subscription_results_url\n", 263 | "\n", 264 | "while next_link:\n", 265 | " results = requests.get(next_link, auth=BASIC_AUTH).json()\n", 266 | " next_features = results['features']\n", 267 | " if next_features:\n", 268 | " latest_feature_creation = parse(next_features[0]['created']).date()\n", 269 | " earliest_feature_creation = parse(next_features[-1]['created']).date()\n", 270 | " print('Fetched {} features fetched ({}, {})'.format(\n", 271 | " len(next_features), earliest_feature_creation, latest_feature_creation))\n", 272 | " feature_collection['features'].extend(next_features)\n", 273 | " next_link = get_next_link(results)\n", 274 | " else:\n", 275 | " next_link = None\n", 276 | "\n", 277 | "print('Total features: {}'.format(len(feature_collection['features'])))" 278 | ] 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "metadata": {}, 283 | "source": [ 284 | "## Saving Results\n", 285 | "We can now save the combined geojson feature collection to a file." 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [ 294 | "from IPython.display import FileLink, FileLinks\n", 295 | "os.makedirs('data', exist_ok=True)\n", 296 | "filename = 'data/combined_collection_{}.geojson'.format(SUBSCRIPTION_ID)\n", 297 | "with open(filename, 'w') as file:\n", 298 | " json.dump(feature_collection, file)\n", 299 | "\n", 300 | "FileLink(filename)" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "Or we can save the detections in separated files according to the `observed` dates." 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": null, 313 | "metadata": {}, 314 | "outputs": [], 315 | "source": [ 316 | "# Filter unique Observed datas in the Feature Collection\n", 317 | "observed_dates = list(sorted(set([i['properties']['observed'] for i in feature_collection['features']])))\n", 318 | "\n", 319 | "# Save all detections per month\n", 320 | "for date in observed_dates:\n", 321 | " filename = 'data/collection_{}_{}.geojson'.format(SUBSCRIPTION_ID, date.split(\"T\")[0])\n", 322 | " with open(filename, 'w') as file:\n", 323 | " monthly_features = [i for i in feature_collection['features'] if i['properties']['observed'] == date]\n", 324 | " fc = {'type': 'FeatureCollection', 'features': monthly_features}\n", 325 | " json.dump(fc, file)\n", 326 | " print(filename)\n", 327 | " \n" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "metadata": {}, 333 | "source": [ 334 | "After downloading the aggregated geojson file with the file link above, try importing the data into a geojson-compatible tool for visualization and exploration:\n", 335 | "- [geojson.io](http://geojson.io/)\n", 336 | "- [kepler gl](https://kepler.gl/demo)" 337 | ] 338 | }, 339 | { 340 | "cell_type": "markdown", 341 | "metadata": {}, 342 | "source": [ 343 | "The saved combined geojson file can also be used to make a geopandas dataframe." 344 | ] 345 | }, 346 | { 347 | "cell_type": "code", 348 | "execution_count": null, 349 | "metadata": {}, 350 | "outputs": [], 351 | "source": [ 352 | "import geopandas as gpd\n", 353 | "gpd.read_file(filename)" 354 | ] 355 | }, 356 | { 357 | "cell_type": "code", 358 | "execution_count": null, 359 | "metadata": {}, 360 | "outputs": [], 361 | "source": [] 362 | } 363 | ], 364 | "metadata": { 365 | "kernelspec": { 366 | "display_name": "Python 3", 367 | "language": "python", 368 | "name": "python3" 369 | }, 370 | "language_info": { 371 | "codemirror_mode": { 372 | "name": "ipython", 373 | "version": 3 374 | }, 375 | "file_extension": ".py", 376 | "mimetype": "text/x-python", 377 | "name": "python", 378 | "nbconvert_exporter": "python", 379 | "pygments_lexer": "ipython3", 380 | "version": "3.6.9" 381 | } 382 | }, 383 | "nbformat": 4, 384 | "nbformat_minor": 2 385 | } 386 | -------------------------------------------------------------------------------- /Analytics/02_fetching_vector_feed_results.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Planet Analytics API Tutorial\n", 8 | "\n", 9 | "# Getting Vector Analytic Feed Results\n", 10 | "This notebook shows how to paginate through Planet Analytic Feed Results for an existing Change or Object Deteciton analytics [Subscription](https://developers.planet.com/docs/analytics/#subscriptions) to construct a combined [geojson](https://geojson.org/) feature collection that can be imported into geospatial analysis tools." 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "## Setup \n", 18 | "To use this notebook, you need an api key for a Planet account with access to the Analytics API.\n", 19 | "#### API Key and Test Connection\n", 20 | "Set `API_KEY` below if it is not already in your notebook as an environment variable.\n", 21 | "See the [Analytics API Docs](https://developers.planet.com/docs/analytics/) for more details on authentication." 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "import os\n", 31 | "import json\n", 32 | "import requests\n", 33 | "import pandas as pd\n", 34 | "import geopandas as gpd\n", 35 | "\n", 36 | "# construct auth tuple for use in the requests library\n", 37 | "API_KEY = os.environ.get('PL_API_KEY')\n", 38 | "BASIC_AUTH = (API_KEY, '')\n", 39 | "BASE_URL = \"https://api.planet.com/analytics/\"\n", 40 | "\n", 41 | "subscriptions_list_url = BASE_URL + 'subscriptions' + '?‚limit=1000'\n", 42 | "resp = requests.get(subscriptions_list_url, auth=BASIC_AUTH)\n", 43 | "if resp.status_code == 200:\n", 44 | " print('Yay, you can access the Analytics API')\n", 45 | " subscriptions = resp.json()['data']\n", 46 | " print('Available subscriptions:', len(subscriptions))\n", 47 | "else:\n", 48 | " print('Something is wrong:', resp.content)" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "#### Specify Analytics Subscription of Interest\n", 56 | "Below we will list your available subscription ids and some metadata in a dataframe and then select a subscription of interest." 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "pd.options.display.max_rows = 1000\n", 66 | "df = pd.DataFrame(subscriptions[:10])\n", 67 | "df['start'] = pd.to_datetime(df['startTime']).dt.date\n", 68 | "df['end'] = pd.to_datetime(df['endTime']).dt.date\n", 69 | "df[['id', 'title', 'description', 'start', 'end']]" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "Pick a subscription from which to pull results, copy its ID and replace below." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "# This example ID is for a subscription of building change detections in Kabul\n", 86 | "# You can replace this ID with your own subscription ID\n", 87 | "SUBSCRIPTION_ID = '9b80bb51-ee89-48f2-a03c-c59398444915'" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "## Getting subscription results\n", 95 | "In this section, we will make sure that we can get data from the subscription of interest by fetching the latest page of results." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "# Construct the url for the subscription's results collection\n", 105 | "subscription_results_url = BASE_URL + 'collections/' + SUBSCRIPTION_ID + '/items'\n", 106 | "print(\"Request URL: {}\".format(subscription_results_url))\n", 107 | "\n", 108 | "# Get subscription results collection, print the first one\n", 109 | "resp = requests.get(subscription_results_url, auth=BASIC_AUTH)\n", 110 | "if resp.status_code == 200:\n", 111 | " print('Yay, you can access analytic feed results!')\n", 112 | " subscription_results = resp.json()\n", 113 | " #Printing the ID from the first subscription\n", 114 | " print(json.dumps(subscription_results['features'][0]['id'], sort_keys=True, indent=2))\n", 115 | "else:\n", 116 | " print('Something is wrong:', resp.content)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "## Pagination" 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "metadata": {}, 129 | "source": [ 130 | "The response json above will only include the most recent 250 detections by default. For subscriptions with many results, you can page through " 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "print(len(subscription_results['features']))" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "More results can be fetched by following the `next` link. Let's look at the links section of the response:" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "subscription_results['links']" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "To get more results, we will want the link with a `rel` of `next`" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": null, 168 | "metadata": {}, 169 | "outputs": [], 170 | "source": [ 171 | "def get_next_link(results_json):\n", 172 | " \"\"\"Given a response json from one page of subscription results, get the url for the next page of results.\"\"\"\n", 173 | " for link in results_json['links']:\n", 174 | " if link['rel'] == 'next':\n", 175 | " return link['href']\n", 176 | " return None" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "next_link = get_next_link(subscription_results)\n", 186 | "print('next page url: {}'.format(next_link))" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "If the result above is `None`, it means your subscription has less than 250 results and you don't need to run the following cell. If there are more than 250 detections, the return will be a URL. Using this url, we can fetch the next page of results." 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "next_results = requests.get(next_link, auth=BASIC_AUTH).json()\n", 203 | "print(json.dumps(next_results['features'][0]['id'], sort_keys=True, indent=2))" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "## Aggregating results" 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "Each page of results comes as one feature collection. We can combine the features from different pages of results into one big feature collection. Below we will page through all results in the subscription from the past 12 months and make a combined feature collection.\n", 218 | "\n", 219 | "Results in the API are ordered by a `created` timestamp. This corresponds the time that the feature was published to a Feed and does not necessarily match the `observed` timestamp in the feature's properties, which corresponds to when the source imagery for a feature was collected.\n", 220 | "\n", 221 | "This means that if your subscritpion was created with backfill you will have a ton of detections on the same day. In this case you might want to add an additional filter by date." 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "latest_feature = subscription_results['features'][0]\n", 231 | "creation_datestring = latest_feature['created']\n", 232 | "print('latest feature creation date:', creation_datestring)" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": null, 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "from dateutil.parser import parse\n", 242 | "# this date string can be parsed as a datetime and converted to a date\n", 243 | "latest_date = parse(creation_datestring).date()\n", 244 | "latest_date" 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": null, 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "from datetime import timedelta\n", 254 | "min_date = latest_date - timedelta(days=365)\n", 255 | "print('Aggregate all detections from after this date:', min_date)" 256 | ] 257 | }, 258 | { 259 | "cell_type": "markdown", 260 | "metadata": {}, 261 | "source": [ 262 | "If you have a ton of detections this might run for a long time. Think about setting a max number of detections, note that they will be increasing by increments of 250." 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "metadata": {}, 269 | "outputs": [], 270 | "source": [ 271 | "feature_collection = {'type': 'FeatureCollection', 'features': []}\n", 272 | "next_link = subscription_results_url\n", 273 | "max_number_detections = 2000\n", 274 | "\n", 275 | "while next_link and len(feature_collection['features']) < max_number_detections:\n", 276 | " results = requests.get(next_link, auth=BASIC_AUTH).json()\n", 277 | " next_features = results['features']\n", 278 | " if next_features:\n", 279 | " latest_feature_creation = parse(next_features[0]['created']).date()\n", 280 | " earliest_feature_creation = parse(next_features[-1]['created']).date()\n", 281 | " print('Fetched {} features fetched ({}, {})'.format(\n", 282 | " len(next_features), earliest_feature_creation, latest_feature_creation))\n", 283 | " feature_collection['features'].extend(next_features)\n", 284 | " next_link = get_next_link(results)\n", 285 | " else:\n", 286 | " next_link = None\n", 287 | "\n", 288 | "print('Total features: {}'.format(len(feature_collection['features'])))" 289 | ] 290 | }, 291 | { 292 | "cell_type": "markdown", 293 | "metadata": {}, 294 | "source": [ 295 | "## Saving Results\n", 296 | "We can now save the combined geojson feature collection to a file." 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": null, 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "from IPython.display import FileLink, FileLinks\n", 306 | "os.makedirs('data', exist_ok=True)\n", 307 | "filename = 'data/combined_collection_{}.geojson'.format(SUBSCRIPTION_ID)\n", 308 | "with open(filename, 'w') as file:\n", 309 | " json.dump(feature_collection, file)\n", 310 | "\n", 311 | "FileLink(filename)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "metadata": {}, 317 | "source": [ 318 | "Or we can save the detections in separated files according to the `observed` dates." 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "metadata": {}, 325 | "outputs": [], 326 | "source": [ 327 | "# Filter unique Observed datas in the Feature Collection\n", 328 | "observed_dates = list(sorted(set([i['properties']['observed'] for i in feature_collection['features']])))\n", 329 | "\n", 330 | "# Save all detections per month\n", 331 | "for date in observed_dates:\n", 332 | " filename = 'data/collection_{}_{}.geojson'.format(SUBSCRIPTION_ID, date.split(\"T\")[0])\n", 333 | " with open(filename, 'w') as file:\n", 334 | " monthly_features = [i for i in feature_collection['features'] if i['properties']['observed'] == date]\n", 335 | " fc = {'type': 'FeatureCollection', 'features': monthly_features}\n", 336 | " json.dump(fc, file)\n", 337 | " print(filename)\n", 338 | " \n" 339 | ] 340 | }, 341 | { 342 | "cell_type": "markdown", 343 | "metadata": {}, 344 | "source": [ 345 | "After downloading the aggregated geojson file with the file link above, try importing the data into a geojson-compatible tool for visualization and exploration:\n", 346 | "- [geojson.io](http://geojson.io/)\n", 347 | "- [kepler gl](https://kepler.gl/demo)" 348 | ] 349 | }, 350 | { 351 | "cell_type": "markdown", 352 | "metadata": {}, 353 | "source": [ 354 | "The saved combined geojson file can also be used to make a geopandas dataframe." 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": null, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "gpd.read_file(filename)[:10]" 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": null, 369 | "metadata": {}, 370 | "outputs": [], 371 | "source": [] 372 | } 373 | ], 374 | "metadata": { 375 | "kernelspec": { 376 | "display_name": "Python 3 (ipykernel)", 377 | "language": "python", 378 | "name": "python3" 379 | }, 380 | "language_info": { 381 | "codemirror_mode": { 382 | "name": "ipython", 383 | "version": 3 384 | }, 385 | "file_extension": ".py", 386 | "mimetype": "text/x-python", 387 | "name": "python", 388 | "nbconvert_exporter": "python", 389 | "pygments_lexer": "ipython3", 390 | "version": "3.9.5" 391 | } 392 | }, 393 | "nbformat": 4, 394 | "nbformat_minor": 4 395 | } 396 | -------------------------------------------------------------------------------- /Analytics/plnbstyles.py: -------------------------------------------------------------------------------- 1 | from IPython.core.display import HTML 2 | 3 | def style_nb(path): 4 | styles = open(path, "r").read() 5 | return HTML("""