├── .github └── workflows │ ├── binder-badge.yml │ ├── on-issue-create.yml │ ├── publish.yml │ ├── scan_notebooks.py │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── binder ├── Dockerfile ├── apt.txt ├── conda-linux-64.lock ├── conda-lock.yml ├── conda-osx-64.lock ├── conda-win-64.lock ├── env-lock.yml ├── environment.yml └── postBuild ├── contributor_guide.md ├── docker-compose.dev.yml ├── docker-compose.yml ├── images ├── cloning_nsidc_data_tutorials.png ├── fetch_upstream_nsidc_data_tutorials.png └── forking_nsidc_data_tutorials.png ├── notebooks ├── ICESat-2_Cloud_Access │ ├── ATL06-direct-access.ipynb │ ├── ATL06-direct-access_rendered.ipynb │ ├── ATL10-h5coro.ipynb │ ├── ATL10-h5coro_rendered.ipynb │ ├── ICESat2-CryoSat2-Coincident.ipynb │ ├── README.md │ ├── SIR_SAR_L2_E_download_script.py │ ├── environment │ │ └── environment.yml │ ├── h5cloud │ │ ├── read_atl10.py │ │ ├── readme.md │ │ └── workflow.py │ ├── img │ │ ├── ATL10_CS2_L2_SAR_query_med.png │ │ ├── atl06_example_end_product.png │ │ ├── icesat2-cryosat2.png │ │ ├── icesat2.atl10.gridded.count_segments.ross_sea.png │ │ └── nsidc_logo.png │ ├── ross_sea.json │ └── trouble_shooting_resampling.ipynb ├── ICESat-2_MODIS_Arctic_Sea_Ice │ ├── Customize and Access Data Rendered.ipynb │ ├── Customize and Access Data.ipynb │ ├── Introduction.ipynb │ ├── NSIDC_logo_2018_web.jpg │ ├── README.md │ ├── Visualize and Analyze Data-Rendered.ipynb │ ├── Visualize and Analyze Data.ipynb │ ├── environment │ │ ├── conda-linux-64.lock │ │ ├── conda-osx-64.lock │ │ ├── conda-win-64.lock │ │ └── environment.yml │ └── tutorial_helper_functions.py ├── SMAP │ ├── 01_download_smap_data.ipynb │ ├── 01_download_smap_data_rendered.ipynb │ ├── 02_read_and_plot_smap_data.ipynb │ ├── 02_read_and_plot_smap_data_rendered.ipynb │ ├── 03_smap_quality_flags.ipynb │ ├── 03_smap_quality_flags_rendered.ipynb │ ├── EASE2_M36km.lats.964x406x1.double │ ├── EASE2_M36km.lons.964x406x1.double │ ├── README.md │ ├── environment │ │ └── environment.yml │ ├── img │ │ └── nsidc_logo.png │ └── working_with_smap_in_xarray.ipynb ├── SnowEx_ASO_MODIS_Snow │ ├── Data-download-polygon-export.png │ ├── Data │ │ └── nsidc-polygon.json │ ├── README.md │ ├── Snow-tutorial.ipynb │ ├── Snow-tutorial_rendered.ipynb │ ├── environment │ │ ├── conda-linux-64.lock │ │ ├── conda-osx-64.lock │ │ ├── conda-win-64.lock │ │ └── environment.yml │ └── tutorial_helper_functions.py ├── iceflow │ ├── .flake8 │ ├── 0_introduction.ipynb │ ├── 1_widget.ipynb │ ├── 2_api.ipynb │ ├── 3_dataviz.ipynb │ ├── 4_time_series_tutorial.ipynb │ ├── README.md │ ├── corrections.ipynb │ ├── data │ │ ├── .placeholder │ │ ├── atm1b_data_2020-11-15T20-05.hdf5 │ │ ├── processed_ATL06_20181015100401_02560110_003_01.h5 │ │ └── twaties-test-GLAH06-2000-2010.h5 │ ├── environment │ │ ├── conda-linux-64.lock │ │ ├── conda-osx-64.lock │ │ ├── conda-win-64.lock │ │ └── environment.yml │ ├── iceflow │ │ ├── __init__.py │ │ ├── client.py │ │ ├── files │ │ │ ├── ib_north.json │ │ │ └── ib_south.json │ │ ├── is2.py │ │ ├── layers.py │ │ ├── processing.py │ │ └── ui.py │ └── img │ │ ├── iceflow-coverage.jpg │ │ ├── log-icons.png │ │ ├── nsidc_logo.png │ │ ├── unfinished_horse.jpg │ │ ├── vaex.png │ │ └── vaex_ib.png ├── itslive │ ├── data │ │ └── .placeholder │ ├── datacube_tools.py │ ├── environment │ │ ├── conda-linux-64.lock │ │ ├── conda-osx-64.lock │ │ ├── conda-win-64.lock │ │ └── environment.yml │ ├── its_live_antarctic_vel.jpg │ ├── itslive-notebook.ipynb │ ├── itslive-widget.ipynb │ └── velocity_widget.py └── measures │ ├── README.md │ ├── Search_Download_and_Plot_GeoTIFFs.ipynb │ ├── Search_Download_and_Plot_GeoTIFFs_rendered.ipynb │ ├── environment │ └── environment.yml │ └── img │ ├── example_geotiff_plot.png │ └── nsidc_logo.png ├── pedagogy_guide.md └── tutorial_templates ├── img ├── example_end_product.png └── nsidc_logo.png └── tutorial_template.ipynb /.github/workflows/binder-badge.yml: -------------------------------------------------------------------------------- 1 | name: AddBinderBadge 2 | on: 3 | pull_request_target: 4 | 5 | jobs: 6 | badge: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: manics/action-binderbadge@main 10 | with: 11 | githubToken: ${{ secrets.GITHUB_TOKEN }} 12 | -------------------------------------------------------------------------------- /.github/workflows/on-issue-create.yml: -------------------------------------------------------------------------------- 1 | name: GitHub Issue to JIRA 2 | on: 3 | issues: 4 | types: 5 | - opened 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | name: Jira issue 11 | steps: 12 | - name: Login 13 | uses: atlassian/gajira-login@v3 # Required for authenticating to Jira 14 | env: 15 | JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} 16 | JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} 17 | JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} 18 | 19 | - name: Create 20 | id: create 21 | uses: atlassian/gajira-create@v3 22 | with: 23 | project: CRYO 24 | issuetype: Story 25 | summary: "GitHub Issue: ${{ github.event.issue.title }}" 26 | description: "${{ github.event.issue.body }}.\n\nCreated on GitHub by user ${{ github.actor }}" 27 | 28 | - name: Log created issue 29 | run: echo "Issue ${{ steps.create.outputs.issue }} was created" 30 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | 8 | jobs: 9 | docker: 10 | permissions: 11 | contents: read 12 | packages: write 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v3 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v3 21 | - name: Login to Docker Hub 22 | uses: docker/login-action@v3 23 | with: 24 | username: ${{ secrets.DOCKER_USER }} 25 | password: ${{ secrets.DOCKER_PASS }} 26 | - name: Login to GitHub Container Registry 27 | uses: docker/login-action@v3 28 | with: 29 | registry: ghcr.io 30 | username: ${{ github.repository_owner }} 31 | password: ${{ secrets.GITHUB_TOKEN }} 32 | - name: Build and push 33 | uses: docker/build-push-action@v5 34 | with: 35 | context: binder 36 | platforms: linux/amd64,linux/arm64 37 | push: true 38 | tags: | 39 | nsidc/tutorials:latest 40 | ghcr.io/nsidc/tutorials:latest 41 | -------------------------------------------------------------------------------- /.github/workflows/scan_notebooks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import importlib 5 | import subprocess 6 | import sys 7 | 8 | 9 | def ensure_environment(notebook, libraries): 10 | for lib_name in libraries: 11 | try: 12 | importlib.import_module(lib_name) 13 | 14 | except Exception: 15 | try: 16 | 17 | importlib.import_module(lib_name, f".{lib_name}") 18 | except ModuleNotFoundError or ImportError as ee: 19 | 20 | print( 21 | f"{notebook} uses a library that is not present in the current environment: {ee.msg}", 22 | file=sys.stderr, 23 | ) 24 | exit(1) 25 | 26 | 27 | def scan_notebook(notebook_path): 28 | notebook_path = notebook_path.replace(" ", "\\ ") 29 | command = f"pipreqsnb {notebook_path} --print --debug" 30 | 31 | libraries = set() 32 | p = subprocess.Popen( 33 | command, 34 | shell=True, 35 | stdout=subprocess.PIPE, 36 | ) 37 | 38 | while True: 39 | inline = p.stdout.readline() 40 | if not inline: 41 | break 42 | parsed_out = inline.decode("UTF-8").lower() 43 | if "==" in parsed_out: 44 | library = parsed_out.split("==")[0] 45 | if library.lower() == "gdal": 46 | library = "osgeo" 47 | libraries.add(library) 48 | print(list(libraries)) 49 | 50 | ensure_environment(notebook_path, libraries) 51 | 52 | 53 | if __name__ == "__main__": 54 | parser = argparse.ArgumentParser() 55 | parser.add_argument("-n", "--notebook", help=" Set flag", required=True) 56 | args = parser.parse_args() 57 | scan_notebook(args.notebook) 58 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test Notebooks 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - notebooks/** 7 | - binder/** 8 | - '.github/workflows/' 9 | types: [opened, synchronize] 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | linux: 17 | name: Linux tests 18 | runs-on: ubuntu-latest 19 | strategy: 20 | fail-fast: true 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: "Install Conda environment with Micromamba" 24 | uses: mamba-org/setup-micromamba@v1 25 | with: 26 | cache-env: true 27 | environment-file: binder/conda-linux-64.lock 28 | environment-name: nsidc-tutorials 29 | 30 | - name: "Verify micromamba" 31 | shell: bash -l {0} 32 | run: | 33 | micromamba info 34 | jupyter kernelspec list 35 | which python 36 | conda list | sort 37 | 38 | - name: "Verify that libraries used in notebooks are present in the environment" 39 | shell: bash -l {0} 40 | run: | 41 | find notebooks -type f -iname \*.ipynb -not -path '*\.ipynb_checkpoints/*' -not -path '*/iceflow/*' | xargs -I % python .github/workflows/scan_notebooks.py -n % 42 | 43 | - name: "Setup Quarto" 44 | uses: quarto-dev/quarto-actions/setup@v2 45 | with: 46 | # version: 1.1.251 47 | version: 1.3.361 48 | 49 | - name: "Execute notebooks with Quarto" 50 | env: 51 | EARTHDATA_USERNAME: ${{ secrets.EARTHDATA_USERNAME }} 52 | EARTHDATA_PASSWORD: ${{ secrets.EARTHDATA_PASSWORD }} 53 | shell: bash -l {0} 54 | run: | 55 | find notebooks -type f -name "*.ipynb" -not -path '*\.ipynb_checkpoints/*' -not -path '*endered*' -not -path '*SnowEx*' | xargs -I F quarto render "F" --execute-daemon-restart 56 | 57 | 58 | win-osx: 59 | name: Runtime (${{ matrix.os }}, micromamba) 60 | runs-on: ${{ matrix.os }}-latest 61 | strategy: 62 | fail-fast: false 63 | matrix: 64 | os: ["windows", "macos"] 65 | include: 66 | - os: windows 67 | environment-file: binder/conda-win-64.lock 68 | - os: macos 69 | environment-file: binder/conda-osx-64.lock 70 | steps: 71 | - uses: actions/checkout@v3 72 | - name: "Install Conda environment with Micromamba" 73 | uses: mamba-org/setup-micromamba@v1 74 | with: 75 | cache-env: true 76 | environment-file: ${{ matrix.environment-file }} 77 | environment-name: nsidc-tutorials 78 | 79 | - name: "Verify micromamba" 80 | shell: bash -l {0} 81 | run: | 82 | micromamba info 83 | jupyter kernelspec list 84 | printenv | sort 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | MANIFEST 2 | notebook/Outputs 3 | build 4 | bin 5 | dist 6 | _build 7 | docs/man/*.gz 8 | docs/source/api/generated 9 | docs/source/config.rst 10 | docs/gh-pages 11 | notebook/i18n/*/LC_MESSAGES/*.mo 12 | notebook/i18n/*/LC_MESSAGES/nbjs.json 13 | notebook/static/components 14 | notebook/static/style/*.min.css* 15 | notebook/static/*/js/built/ 16 | notebook/static/*/built/ 17 | notebook/static/built/ 18 | notebook/static/*/js/main.min.js* 19 | notebook/static/lab/*bundle.js 20 | notebooks/itslive/data/*.nc 21 | notebooks/iceflow/data 22 | notebooks/*/Outputs 23 | .python-version 24 | node_modules 25 | *.py[co] 26 | __pycache__ 27 | *.egg-info 28 | *~ 29 | *.bak 30 | .ipynb_checkpoints 31 | .tox 32 | .DS_Store 33 | \#*# 34 | .#* 35 | .coverage 36 | .pytest_cache 37 | src 38 | 39 | *.swp 40 | *.map 41 | .idea/ 42 | Read the Docs 43 | config.rst 44 | *.iml 45 | /.project 46 | /.pydevproject 47 | 48 | package-lock.json 49 | geckodriver.log 50 | *.iml 51 | 52 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 1.0.1 (2021-01-21) 2 | 3 | * Add Changelog. 4 | * Improve IceFlow notebook. 5 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## 1. Our Commitment 4 | 5 | We are dedicated to fostering a respectful environment for everyone contributing to this project. We expect all participants to treat each other with respect, professionalism, and kindness. 6 | 7 | ## 2. Expected Behavior 8 | 9 | - Be respectful and considerate of others. 10 | - Engage in constructive discussions and offer helpful feedback. 11 | - Gracefully accept constructive criticism. 12 | 13 | ## 3. Unacceptable Behavior 14 | 15 | The following behaviors will not be tolerated: 16 | 17 | - Harassment, discrimination, or intimidation of any kind. 18 | - Offensive, abusive, or derogatory language and actions. 19 | - Personal attacks or insults. 20 | - Trolling or disruptive conduct. 21 | - Sharing inappropriate content. 22 | 23 | ## 4. Reporting Violations 24 | If you experience or witness any behavior that violates this Code of Conduct, please report it by contacting the project maintainers. All reports will be reviewed confidentially. 25 | 26 | ## 5. Enforcement 27 | Violations of this Code of Conduct may result in actions such as warnings, temporary bans, or permanent exclusion from participation at the discretion of the maintainers. 28 | 29 | ## Contact Info 30 | Email: 31 | Organization: National Snow and Ice Data Center¹ 32 | Website: 33 | Date last modified: 01-22-2025 34 | 35 | ¹Work performed under NASA contract 80GSFC23CA035. 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 National Snow and Ice Data Center 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # NSIDC-Data-Tutorials 3 | 4 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/nsidc/NSIDC-Data-Tutorial/main?urlpath=lab/tree/notebooks) 5 | 6 | [![Test Notebooks](https://github.com/nsidc/NSIDC-Data-Tutorials/actions/workflows/test.yml/badge.svg)](https://github.com/nsidc/NSIDC-Data-Tutorials/actions/workflows/test.yml) 7 | 8 | ## Summary 9 | 10 | This combined repository includes tutorials and code resources provided by the NASA National Snow and Ice Data Center Distributed Active Archive Center (NSIDC DAAC). These tutorials are provided as Python-based Jupyter notebooks that provide guidance on working with various data products, including how to access, subset, transform, and visualize data. Each tutorial can be accessed by navigating to the /notebooks folder of this repository. Please see the README files associated with each individual tutorial folder for more information on each tutorial and their learning objectives. Please note that all branches outside of `Main` should be considered in development and are not supported. 11 | 12 | ## Tutorials 13 | 14 | ### [ICESat-2_Cloud_Access Notebooks](./notebooks/ICESat-2_Cloud_Access) 15 | 16 | These notebooks demonstrate how to search and access ICESat-2 from the NASA Earthdata Cloud: 17 | 18 | **Accessing and working with ICESat-2 Data in the Cloud** 19 | 20 | Originally presented to the UWG (User Working Group) in May 2022, this tutorial demonstrates how to search for ICESat-2 data hosted in the Earthdata Cloud and how to directly access it from an Amazon Web Services (AWS) Elastic Compute Cloud (EC2) instance using the `earthaccess` package. 21 | 22 | **Plotting ICESat-2 and CryoSat-2 Freeboards** 23 | 24 | This notebook demonstrates plotting ICESat-2 and CryoSat-2 data in the same map from within an AWS ec2 instance. ICESat-2 data are accessed via "direct S3 access" using `earthaccess`. CryoSat-2 data are downloaded to our cloud instance from their ftp storage lcoation and accessed locally. 25 | 26 | **Processing Large-scale Time Series of ICESat-2 Sea Ice Height in the Cloud** 27 | 28 | This notebook utilizes several libraries to performantly search, access, read, and grid ATL10 data over the Ross Sea, Antarctica including `earthaccess`, `h5coro`, and `geopandas`. The notebook provides further guidance on how to scale this analysis to the entire continent, running the same workflow from a script that can be run from your laptop using [Coiled](https://www.coiled.io/). 29 | 30 | ### [MEaSUREs](./notebooks/measures) 31 | 32 | **Download, crop, resample, and plot multiple GeoTIFFs** 33 | 34 | This tutorial guides you through programmatically accessing and downloading GeoTIFF files from the NSIDC DAAC to your local computer. We then crop and resample one GeoTIFF based on the extent and pixel size of another GeoTIFF, then plot one on top of the other. 35 | 36 | We will use two data sets from the NASA [MEaSUREs](https://nsidc.org/data/measures) (Making Earth System data records for Use in Research Environments) program as an example: 37 | 38 | * [MEaSUREs Greenland Ice Mapping Project (GrIMP) Digital Elevation Model from GeoEye and WorldView Imagery, Version 2 (NSIDC-0715)](https://nsidc.org/data/nsidc-0715/versions/2) 39 | * [MEaSUREs Greenland Ice Velocity: Selected Glacier Site Velocity Maps from InSAR, Version 4 (NSIDC-0481)](https://nsidc.org/data/nsidc-0481/versions/4) 40 | 41 | ### [SnowEx_ASO_MODIS_Snow](./notebooks/SnowEx_ASO_MODIS_Snow) 42 | 43 | **Snow Depth and Snow Cover Data Exploration** 44 | 45 | Originally demonstrated through the NASA Earthdata Webinar "Let It Snow! Accessing and Analyzing Snow Data at the NSIDC DAAC" on May 6, 2020, this tutorial provides guidance on how to discover, access, and couple snow data across varying geospatial scales from NASA's SnowEx, Airborne Snow Observatory, and Moderate Resolution Imaging Spectroradiometer (MODIS) missions. The tutorial highlights the ability to search and access data by a defined region, and combine and compare snow data across different data formats and scales using a Python-based Jupyter Notebook. 46 | 47 | ### [ICESat-2_MODIS_Arctic_Sea_Ice](./notebooks/ICESat-2_MODIS_Arctic_Sea_Ice) 48 | 49 | **Getting the most out of NSIDC DAAC data: Discovering, Accessing, and Harmonizing Arctic Remote Sensing Data** 50 | 51 | Originally presented during the 2019 AGU Fall Meeting, this tutorial demonstrates the NSIDC DAAC's data discovery, access, and subsetting services, along with basic open source resources used to harmonize and analyze data across multiple products. The tutorial is provided as a series of Python-based Jupyter Notebooks, focusing on sea ice height and ice surface temperature data from NASA’s ICESat-2 and MODIS missions, respectively, to characterize Arctic sea ice. 52 | 53 | ### [ITS_LIVE](./notebooks/itslive) 54 | 55 | **Global land ice velocities.** 56 | The Inter-mission Time Series of Land Ice Velocity and Elevation (ITS_LIVE) project facilitates ice sheet, ice shelf and glacier research by providing a globally comprehensive and temporally dense multi-sensor record of land ice velocity and elevation with low latency. Scene-pair velocities were generated from satellite optical and radar imagery. 57 | 58 | The notebooks on this project demonstrate how to search and access ITS_LIVE velocity pairs and provide a simple example on how to build a data cube. 59 | 60 | ### [IceFlow](./notebooks/iceflow) 61 | 62 | > [!CAUTION] 63 | > The IceFlow notebooks and supporting code have some known problems and users 64 | > should exercise caution. It is likely that users will run into errors while 65 | > interacting with the notebooks. Requests for ITRF transformations are not 66 | > currently working as expected. We recommend users look at the `corrections` 67 | > notebook for information about how to apply ITRF transformations to data 68 | > themselves. IceFlow is currently under maintenence, and we hope to resolve 69 | > some of these issues soon. 70 | 71 | **Harmonized data for pre-IceBridge, ICESat and IceBridge data sets.** 72 | These Jupyter notebooks are interactive documents to teach students and researchers interested in cryospheric sciences how to access and work with airborne altimetry and related data sets from NASA’s [IceBridge](https://www.nasa.gov/mission_pages/icebridge/index.html) mission, and satellite altimetry data from [ICESat](https://icesat.gsfc.nasa.gov/icesat/) and [ICESat-2](https://icesat-2.gsfc.nasa.gov/) missions using the NSIDC **IceFlow API** 73 | 74 | ## Usage with Binder 75 | 76 | The Binder button above allows you to explore and run the notebook in a shared cloud computing environment without the need to install dependencies on your local machine. Note that this option will not directly download data to your computer; instead the data will be downloaded to the cloud environment. 77 | 78 | ## Usage with Docker 79 | 80 | ### On Mac OSX or Linux 81 | 82 | 83 | 1. Install [Docker](https://docs.docker.com/install/). Use the left-hand navigation to select the appropriate install depending on operating system. 84 | 85 | 2. Download the [NSIDC-Data-Tutorials repository from Github](https://github.com/nsidc/NSIDC-Data-Tutorials/archive/master.zip). 86 | 87 | 3. Unzip the file, and open a terminal window in the `NSIDC-Data-Tutorials` folder's location. 88 | 89 | 4. From the terminal window, launch the docker container using the following command, replacing [path/notebook_folder] with your path and notebook folder name: 90 | 91 | ```bash 92 | docker run --name tutorials -p 8888:8888 -v [path/notebook_folder]:/home/jovyan/work nsidc/tutorials 93 | ``` 94 | 95 | Example: 96 | 97 | ```bash 98 | docker run --name tutorials -p 8888:8888 -v /Users/name/Desktop/NSIDC-Data-Tutorials:/home/jovyan/work nsidc/tutorials 99 | ``` 100 | 101 | Or, with docker-compose: 102 | 103 | ```bash 104 | docker-compose up 105 | ``` 106 | 107 | If you want to mount a directory with write permissions, you need to grant the container the same permissions as the one on the directory to be mounted and tell it that has "root" access (within the container). This is important if you want to persist your work or download data to a local directory and not just the docker container. Run the example command below for this option: 108 | 109 | ```bash 110 | docker run --name tutorials -e NB_UID=$(id -u) --user root -p 8888:8888 -v /Users/name/Desktop/NSIDC-Data-Tutorials:/home/jovyan/work nsidc/tutorials 111 | ``` 112 | 113 | The initialization will take some time and will require 2.6 GB of space. Once the startup is complete you will see a line of output similar to this: 114 | 115 | ``` 116 | To access the notebook, open this file in a browser: 117 | file:///home/jovyan/.local/share/jupyter/runtime/nbserver-6-open.html 118 | Or copy and paste one of these URLs: 119 | http://4dc97ddd7a0d:8888/?token=f002a50e25b6f623aa775312737ba8a23ffccfd4458faa6f 120 | or http://127.0.0.1:8888/?token=f002a50e25b6f623aa775312737ba8a23ffccfd4458faa6f 121 | ``` 122 | 123 | If you started your container with the `-d`/`--detach` option, check `docker logs tutorials` for this output. 124 | 125 | 5. Open up a web browser and copy one of the URLs as instructed above. 126 | 127 | 6. You will be brought to a Jupyter Notebook interface running through the Docker container. The left side of the interface displays your local directory structure. Navigate to the **`work`** folder of the `NSIDC-Data-Tutorials` repository folder. You can now interact with the notebooks to explore and access data. 128 | 129 | 130 | ### On Windows 131 | 132 | 1. Install [Docker](https://docs.docker.com/docker-for-windows/install/). 133 | 134 | 2. Download the [NSIDC-Data-Tutorials repository from Github](https://github.com/nsidc/NSIDC-Data-Tutorials/archive/master.zip). 135 | 136 | 3. Unzip the file, and open a terminal window (use Command Prompt or PowerShell, not PowerShell ISE) in the `NSIDC-Data-Tutorials` folder's location. 137 | 138 | 5. From the terminal window, launch the docker container using the following command, replacing [path\notebook_folder] with your path and notebook folder name: 139 | 140 | ```bash 141 | docker run --name tutorials -p 8888:8888 -v [path\notebook_folder]:/home/jovyan/work nsidc/tutorials 142 | ``` 143 | 144 | Example: 145 | 146 | ```bash 147 | docker run --name tutorials -p 8888:8888 -v C:\notebook_folder:/home/jovyan/work nsidc/tutorials 148 | ``` 149 | 150 | Or, with docker-compose: 151 | 152 | ```bash 153 | docker-compose up 154 | ``` 155 | 156 | If you want to mount a directory with write permissions you need to grant the container the same permissions as the one on the directory to be mounted and tell it that has "root" access (within the container) 157 | 158 | ```bash 159 | docker run --name tutorials --user root -p 8888:8888 -v C:\notebook_folder:/home/jovyan/work nsidc/tutorials 160 | ``` 161 | 162 | The initialization will take some time and will require 2.6 GB of space. Once the startup is complete you will see a line of output similar to this: 163 | 164 | ``` 165 | To access the notebook, open this file in a browser: 166 | file:///home/jovyan/.local/share/jupyter/runtime/nbserver-6-open.html 167 | Or copy and paste one of these URLs: 168 | http://(6a8bfa6a8518 or 127.0.0.1):8888/?token=2d72e03269b59636d9e31937fcb324f5bdfd0c645a6eba3f 169 | ``` 170 | 171 | If you started your container with the `-d`/`--detach` option, check `docker logs tutorials` for this output. 172 | 173 | 6. Follow the instructions and copy one of the URLs into a web browser and hit return. The address should look something like this: 174 | 175 | `http://127.0.0.1:8888/?token=2d72e03269b59636d9e31937fcb324f5bdfd0c645a6eba3f` 176 | 177 | 7. You will now see the NSIDC-Data-Tutorials repository within the Jupyter Notebook interface. Navigate to **/work** to open the notebooks. 178 | 179 | 8. You can now interact with the notebooks to explore and access data. 180 | 181 | ## Usage with Mamba/Conda 182 | 183 | > Note: If we already have conda or mamba installed we can skip the first step. 184 | 185 | 1. Install mambaforge (Python 3.9+) for your platform from [mamba documentation](https://mamba.readthedocs.io/en/latest/installation.html) 186 | 187 | 188 | 2. Download the [NSIDC-Data-Tutorials](https://github.com/nsidc/NSIDC-Data-Tutorials) repository from Github by clicking the green 'Code' button located at the top right of the repository page and clicking 'Download Zip'. Unzip the file, and open a command line or terminal window in the NSIDC-Data-Tutorials folder's location. 189 | 190 | 3. From a command line or terminal window, install the required environment with the following commands: 191 | 192 | Linux 193 | ```bash 194 | mamba create -n nsidc-tutorials --file binder/conda-linux-64.lock 195 | ``` 196 | 197 | OSX 198 | ```bash 199 | mamba create -n nsidc-tutorials --file binder/conda-osx-64.lock 200 | ``` 201 | 202 | Windows 203 | ```bash 204 | mamba create -n nsidc-tutorials --file binder/conda-win-64.lock 205 | ``` 206 | 207 | You should now see that the dependencies were installed and our environment is ready to be used. 208 | 209 | 210 | Activate the environment with 211 | 212 | ``` 213 | conda activate nsidc-tutorials 214 | ``` 215 | 216 | Launch the notebook locally with the following command: 217 | 218 | ```bash 219 | jupyter lab 220 | ``` 221 | 222 | This should open a browser window with the JupyterLab IDE, showing your current working directory on the left-hand navigation. Navigate to the tutorial folder of choice and click on their associated *.ipynb files to get started. 223 | 224 | 225 | ### Tutorial Environments 226 | 227 | Although the nsidc-tutorial environment should run all the notebooks in this repository, we also include tutorial-specific environments that will only contain the dependencies for them. If we don't want to "pollute" our conda environments and we are only going to work with one of the tutorials we recommend to use them instead of the `nsidc-tutorial` environment. The steps to install them are exactly the same but the environment files are inside the environment folders in each of the tutorials. e.g. for ITS_LIVE 228 | 229 | ```bash 230 | cd notebooks/itslive 231 | mamba create -n nsidc-itslive --file environment/conda-linux-64.lock 232 | conda activate nsidc-itslive 233 | jupyter lab 234 | ``` 235 | 236 | This should create a pinned environment that should be fully reproducible across platforms. 237 | 238 | > **NOTE:** Sometimes Conda environments change (break) even with pinned down dependencies. If you run into an issue with dependencies for the tutorials please open an issue and we'll try to fix it as soon as possible. 239 | 240 | 241 | ## Credit 242 | 243 | This software is developed by the National Snow and Ice Data Center with funding from multiple sources. 244 | 245 | ## License 246 | 247 | This repository is licensed under the MIT license. [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 248 | -------------------------------------------------------------------------------- /binder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pangeo/base-image:2023.01.13 2 | 3 | 4 | USER $NB_UID 5 | 6 | ENTRYPOINT ["jupyter", "lab","--ip=0.0.0.0","--allow-root"] 7 | -------------------------------------------------------------------------------- /binder/apt.txt: -------------------------------------------------------------------------------- 1 | curl 2 | git 3 | jq 4 | htop 5 | tmux 6 | tree 7 | vim 8 | neovim 9 | net-tools 10 | iproute2 11 | ffmpeg 12 | -------------------------------------------------------------------------------- /binder/env-lock.yml: -------------------------------------------------------------------------------- 1 | name: env-lock 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | # https://github.com/conda/conda-lock/issues/381 7 | - conda=22.9 8 | - conda-lock=1.2.1 9 | - mamba>=1.0 10 | - lockfile 11 | - pip 12 | -------------------------------------------------------------------------------- /binder/environment.yml: -------------------------------------------------------------------------------- 1 | name: nsidc-tutorials 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - fsspec>=2022.10 7 | - gdal=3.6 8 | - xarray 9 | - rioxarray 10 | - matplotlib-base>=3.5 11 | - ipyleaflet 12 | - shapely 13 | - numpy=1.23.5 # datasharder bug 14 | - geopandas 15 | - h5py 16 | - h5netcdf 17 | - netcdf4=1.6.2 18 | - geopy 19 | - pyresample 20 | - earthaccess>=0.9.0 21 | - fiona 22 | - zarr 23 | - ipympl 24 | - descartes 25 | - rasterio~=1.3.6 26 | - plotly 27 | - joblib 28 | - cartopy 29 | - icepyx 30 | - h5coro 31 | - pystac-client 32 | - vaex 33 | - intake-xarray 34 | - intake-stac 35 | - jupyter-offlinenotebook 36 | - sidecar 37 | - geoviews 38 | - pipreqsnb 39 | - conda-lock>=1.2.1 40 | - mamba>=1.0 41 | - coiled>=0.9.30 42 | - pip 43 | - pip: 44 | - awscliv2 45 | platforms: 46 | - linux-64 47 | - osx-64 48 | - win-64 49 | -------------------------------------------------------------------------------- /binder/postBuild: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/binder/postBuild -------------------------------------------------------------------------------- /contributor_guide.md: -------------------------------------------------------------------------------- 1 | # Contribution Guide 2 | 3 | ## Ways to contribute 4 | 5 | We welcome contributions in many forms; 6 | - Share a use case, example or idea for a new tutorial 7 | - Submit a new tutorial 8 | - Update an existing tutorial 9 | - Submit bug reports and feature requests 10 | - Fix typos 11 | - Add documentation 12 | 13 | If you would like to contribute, check the _Issues_ by clicking on the _Issues_ tab. 14 | Someone might already have a similar idea. If not, then create a new issue and submit your suggestion. 15 | 16 | Please read our [code of conduct](CODE_OF_CONDUCT.md) 17 | 18 | ## How to contribute 19 | 20 | We follow a __Forking Workflow__. This approach allows contributions from outside of NSIDC. 21 | 22 | A detailed recipe for contributing using the __Forking Workflow__ is given below. However, to orient you and provide a roadmap, the workflow is described here briefly. 23 | 24 | In a __Forking Workflow__ the `nsidc/NSIDC-Data-Tutorials` repository is _Forked_ creating a "copy" under a your 25 | private repository; e.g. `/NSIDC-Data-Tutorials`, where `` is your github name. This "copy" is just a `git clone` of 26 | `nsidc/NSIDC-Data-Tutorials`. To add content or make changes to existing content, you follow a __Branching Workflow__; create a branch, 27 | do some work, commit the changes, and push work to your private repository 28 | (`/NSIDC-Data-Tutorials`). Once you have completed the work, you then create a pull request, which, once approved, is 29 | merged into the central `nsidc/NSIDC-Data-Tutorials`. 30 | 31 | An alternative description of a __Forking Workflow__ can be found [here](https://www.asmeurer.com/git-workflow/). 32 | A more in depth discussion of the __Forking Workflow__ can be found 33 | [here](https://www.atlassian.com/git/tutorials/comparing-workflows/forking-workflow) 34 | 35 | ### Fork the `nsidc/NSIDC-Data-Tutorials` repository 36 | 37 | Forking creates a copy of the official `NSIDC-Data-Tutorials` repository as a personal repository with the same name. You can add new content and make changes to this _forked_ repository. These changes will not be made in the official repository until you create a _pull request_. 38 | 39 | To _Fork_ the `nsidc/NSIDC-Data-Tutorials` click on the Fork button in the top right corner of the main repo web page. This will automatically create a repo `/NSIDC-Data-Tutorials`. 40 | 41 | ![Forking Button](images/forking_nsidc_data_tutorials.png) 42 | 43 | ### Clone your private repository 44 | 45 | The forked repository `/NSIDC-Data-Tutorials` needs to be cloned to your local machine. To do this, click on the green _Code_ button in the forked repository and copy the repository url. 46 | 47 | ![Cloning repo](images/cloning_nsidc_data_tutorials.png) 48 | 49 | This url will be `git@github.com:/NSIDC-Data-Tutorials.git` 50 | 51 | Then on your local machine type. You can just paste in the repository url. 52 | 53 | ``` 54 | $ git clone git@github.com:/NSIDC-Data-Tutorials.git 55 | ``` 56 | 57 | This creates a directory `NSIDC-Data-Tutorials`. `cd` into the NSIDC-Data-Tutorials directory. You are now ready to create new content or make changes. 58 | 59 | 60 | ### Get any changes made to the remote repo 61 | 62 | Each time you start a new feature or notebook, it is __best-practice__ to pull any changes from the official remore repository. This is done in two steps. 63 | 64 | - _Fetch_ changes to the official repo by clicking on the _Fetch upstream_ button. If there are changes to fetch, the green _Compare and merge_ button will be highlighted. Click this button. Your private remote repo is now up-to-date with the official repo. 65 | 66 | ![Fetch upstream button](images/fetch_upstream_nsidc_data_tutorials.png) 67 | 68 | - Now you have to __pull__ those changes to your local repo. 69 | 70 | ``` 71 | $ git checkout main # Make sure you are on the main branch 72 | $ git pull # Fetch and merge changes 73 | ``` 74 | 75 | ### Create a new branch 76 | 77 | Creating a new local branch ensures that development work is kept separate from the `main` branch. Once the new feature or tutorial is complete and free from error, it can be merged with the main branch. 78 | 79 | A step-by-step sequence of commands to create a new branch is below. 80 | 81 | ``` 82 | $ git branch # this lists branches 83 | $ git branch new_feature # create a new branch 84 | $ git checkout new_feature # switch to new_branch 85 | $ git branch # You should see the new branch highlghted in green or with an asterisk to show that is the 86 | # branch you are on 87 | ``` 88 | 89 | This can also be done in a single command. 90 | 91 | ``` 92 | $ git checkout -b new_branch 93 | ``` 94 | 95 | You are now ready to add a new notebook or do some other work. 96 | 97 | ### Do some work 98 | 99 | Creating a new notebook, adding new code or documentation, and making changes follow a sequence of steps. 100 | 101 | ``` 102 | 103 | $ git add # Stage a file or files 104 | $ git commit -m 'a short note saying what was done and why' # commit those files 105 | 106 | $ git add 107 | $ git commit -m 'a short note saying what was done and why' 108 | . 109 | . 110 | . 111 | ``` 112 | 113 | The idea is that commits provide a checkpoint for work done to a repo. The messages tell other developers what has been done. If necessary, those commits can be discarded or the repo __rewound__ to undo those changes. Because of this, it is best to keep commits relatively small and atomic. 114 | 115 | ### Push work to private repository 116 | When you are ready to share your work you push it to your forked copy of the main repo. 117 | 118 | ``` 119 | $ git push origin new_branch 120 | ``` 121 | 122 | ### Create a pull request 123 | Creating a pull request notifies other developers that you have new work to merge into the main branch. 124 | 125 | Usually you will see a _Make Pull Request_ button in the branch of your forked repo. 126 | 127 | Click this button. You will be given options of branches to compare your new_branch with. Usually this will be the `main` branch. Add a description of what you have done to help other developers review your work. You can also assign reviewers. 128 | 129 | Reviewers then review your work. They may suggest some changes or fixes. 130 | 131 | You can continue to push changes to the pull request until it is merged. That way you can make changes, fix typos etc. 132 | 133 | Once everyone is happy, the pull request is merged. 134 | 135 | It is good practice to delete the new_branch once the pull request has been merged. 136 | 137 | Happy Coding! 138 | -------------------------------------------------------------------------------- /docker-compose.dev.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | 4 | tutorials: 5 | build: . 6 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | 4 | tutorials: 5 | image: nsidc/tutorials 6 | container_name: tutorials 7 | ports: 8 | - 8888:8888 9 | volumes: 10 | - ./notebooks:/home/jovyan/work 11 | -------------------------------------------------------------------------------- /images/cloning_nsidc_data_tutorials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/images/cloning_nsidc_data_tutorials.png -------------------------------------------------------------------------------- /images/fetch_upstream_nsidc_data_tutorials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/images/fetch_upstream_nsidc_data_tutorials.png -------------------------------------------------------------------------------- /images/forking_nsidc_data_tutorials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/images/forking_nsidc_data_tutorials.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/README.md: -------------------------------------------------------------------------------- 1 | # ICESat-2 Cloud Access 2 | 3 | ## Summary 4 | We provide several notebooks showcasing how to search and access ICESat-2 from the NASA Earthdata Cloud. NASA data "in the cloud" are stored in Amazon Web Services (AWS) Simple Storage Service (S3) Buckets. **Direct Access** is an efficient way to work with data stored in an S3 Bucket when you are working in the cloud. Cloud-hosted granules can be opened and loaded into memory without the need to download them first. This allows you take advantage of the scalability and power of cloud computing. 5 | 6 | ### [Accessing and working with ICESat-2 data in the cloud](./ATL06-direct-access_rendered.ipynb) 7 | This notebook demonstrates searching for cloud-hosted ICESat-2 data and directly accessing Land Ice Height (ATL06) granules from an Amazon Compute Cloud (EC2) instance using the `earthaccess` package. 8 | 9 | #### Key Learning Objectives 10 | 1. Use `earthaccess` to search for ICESat-2 data using spatial and temporal filters and explore search results; 11 | 2. Open data granules using direct access to the ICESat-2 S3 bucket; 12 | 3. Load a HDF5 group into an `xarray.DataTree`; 13 | 4. Visualize the land ice heights using `hvplot`. 14 | 15 | ### [Plotting ICESat-2 and CryoSat-2 Freeboards](./ICESat2-CryoSat2-Coincident.ipynb) 16 | This notebook demonstrates plotting coincident ICESat-2 and CryoSat-2 data in the same map from within an AWS ec2 instance. ICESat-2 data are accessed via "direct S3 access" using `earthaccess`. CryoSat-2 data are downloaded to our cloud instance from their ftp storage lcoation and accessed locally. 17 | 18 | #### Key Learning Objectives 19 | 1. use `earthaccess` to search for ICESat-2 ATL10 data using a spatial filter 20 | 2. open cloud-hosted files using direct access to the ICESat-2 S3 bucket; 21 | 3. use cs2eo script to download files into your hub instance 22 | 3. load an HDF5 group into an `xarray.Dataset`; 23 | 4. visualize freeboards using `hvplot`. 24 | 5. map the locations of ICESat-2 and CryoSat-2 freeboards using `cartopy` 25 | 26 | ### [Processing Large-scale Time Series of ICESat-2 Sea Ice Height in the Cloud](./ATL10-h5coro_rendered.ipynb) 27 | This notebook utilizes several libraries to performantly search, access, read, and grid ATL10 data over the Ross Sea, Antarctica including `earthaccess`, `h5coro`, and `geopandas`. The notebook provides further guidance on how to scale this analysis to the entire continent, running the same workflow from a script that can be run from your laptop using [Coiled](https://www.coiled.io/). 28 | 29 | #### Key Learning Objectives 30 | 1. Use earthaccess to authenticate with Earthdata Login, search for ICESat-2 data using spatial and temporal filters, and directly access files in the cloud. 31 | 2. Open data granules using h5coro to efficiently read HDF5 data from the NSIDC DAAC S3 bucket. 32 | 3. Load data into a geopandas.DataFrame containing geodetic coordinates, ancillary variables, and date/time converted from ATLAS Epoch. 33 | 4. Grid track data to EASE-Grid v2 6.25 km projected grid using drop-in-the-bucket resampling. 34 | 5. Calculate mean statistics and assign aggregated data to grid cells. 35 | 6. Visualize aggregated sea ice height data on a map. 36 | 37 | ## Set up 38 | To run the notebooks provided in this folder in the Amazon Web Services (AWS) cloud, there are a couple of options: 39 | * An EC2 instance already set up with the necessary software installed to run a Jupyter notebook, and the environment set up using the provided environment.yml file. **Note:** If you are running these notebooks on your own AWS EC2 instance using the environment set up using the environment.yml file in the NSIDC-Data-Tutorials/notebooks/ICESat-2_Cloud_Access/environment folder, you may need to run the following command before running the notebook to ensure the notebook executes properly: 40 | 41 | `jupyter nbextension enable --py widgetsnbextension` 42 | 43 | You do NOT need to do this if you are using the environment set up using the environment.yml file from the NSIDC-Data-Tutorials/binder folder. 44 | 45 | * Alternatively, if you have access to one, it can be run in a managed cloud-based Jupyter hub. Just make sure all the necessary libraries are installed (e.g. `earthaccess`,`xarray`,`hvplot`, etc.). 46 | 47 | For further details on the prerequisites, see the 'Prerequisites' section in each notebook. 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/SIR_SAR_L2_E_download_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | from ftplib import FTP 4 | import sys 5 | 6 | 7 | download_file_obj = None 8 | read_byte_count = None 9 | total_byte_count = None 10 | 11 | 12 | def get_padded_count(count, max_count): 13 | return str(count).zfill(len(str(max_count))) 14 | 15 | 16 | def file_byte_handler(data): 17 | global download_file_obj, read_byte_count, total_byte_count 18 | download_file_obj.write(data) 19 | read_byte_count = read_byte_count + len(data) 20 | progress_bar(read_byte_count, total_byte_count) 21 | 22 | 23 | def progress_bar(progress, total, prefix="", size=60, file=sys.stdout): 24 | if total != 0: 25 | x = int(size * progress / total) 26 | x_percent = int(100 * progress / total) 27 | file.write(f" {prefix} [{'='*x}{' '*(size-x)}] {x_percent} % \r") 28 | file.flush() 29 | 30 | 31 | def download_files(user_email, esa_files): 32 | global download_file_obj, read_byte_count, total_byte_count 33 | print("About to connect to ESA science server") 34 | with FTP("science-pds.cryosat.esa.int") as ftp: 35 | try: 36 | ftp.login("anonymous", user_email) 37 | print("Downloading {} files".format(len(esa_files))) 38 | 39 | for i, filename in enumerate(esa_files): 40 | padded_count = get_padded_count(i + 1, len(esa_files)) 41 | print("{}/{}. Downloading file {}".format(padded_count, len(esa_files), os.path.basename(filename))) 42 | 43 | with open(os.path.basename(filename), 'wb') as download_file: 44 | download_file_obj = download_file 45 | total_byte_count = ftp.size(filename) 46 | read_byte_count = 0 47 | ftp.retrbinary('RETR ' + filename, file_byte_handler, 1024) 48 | print("\n") 49 | finally: 50 | print("Exiting FTP.") 51 | ftp.quit() 52 | 53 | 54 | if __name__ == '__main__': 55 | 56 | esa_files = ['SIR_SAR_L2/2019/12/CS_LTA__SIR_SAR_2__20191227T110305_20191227T111751_E001.nc', 'SIR_SAR_L2/2020/03/CS_LTA__SIR_SAR_2__20200329T163208_20200329T164044_E001.nc', 'SIR_SAR_L2/2020/01/CS_LTA__SIR_SAR_2__20200114T203033_20200114T204440_E001.nc', 'SIR_SAR_L2/2019/11/CS_LTA__SIR_SAR_2__20191103T134759_20191103T135125_E001.nc', 'SIR_SAR_L2/2020/02/CS_LTA__SIR_SAR_2__20200204T191657_20200204T192558_E001.nc', 'SIR_SAR_L2/2019/12/CS_LTA__SIR_SAR_2__20191216T215645_20191216T220909_E001.nc', 'SIR_SAR_L2/2020/03/CS_LTA__SIR_SAR_2__20200315T065755_20200315T071241_E001.nc', 'SIR_SAR_L2/2019/10/CS_LTA__SIR_SAR_2__20191030T135252_20191030T135600_E001.nc', 'SIR_SAR_L2/2020/02/CS_LTA__SIR_SAR_2__20200219T081800_20200219T083303_E001.nc', 'SIR_SAR_L2/2020/01/CS_LTA__SIR_SAR_2__20200110T203717_20200110T204612_E001.nc', 'SIR_SAR_L2/2020/04/CS_LTA__SIR_SAR_2__20200409T053748_20200409T054151_E001.nc', 'SIR_SAR_L2/2020/04/CS_LTA__SIR_SAR_2__20200413T053254_20200413T053659_E001.nc', 'SIR_SAR_L2/2020/02/CS_LTA__SIR_SAR_2__20200208T191154_20200208T192117_E001.nc', 'SIR_SAR_L2/2020/03/CS_LTA__SIR_SAR_2__20200319T065300_20200319T070802_E001.nc', 'SIR_SAR_L2/2020/03/CS_LTA__SIR_SAR_2__20200304T175209_20200304T180102_E001.nc', 'SIR_SAR_L2/2019/11/CS_LTA__SIR_SAR_2__20191128T122800_20191128T123212_E001.nc', 'SIR_SAR_L2/2019/10/CS_LTA__SIR_SAR_2__20191009T150801_20191009T151142_E001.nc', 'SIR_SAR_L2/2019/11/CS_LTA__SIR_SAR_2__20191121T231659_20191121T232817_E001.nc', 'SIR_SAR_L2/2020/02/CS_LTA__SIR_SAR_2__20200215T082253_20200215T083741_E001.nc', 'SIR_SAR_L2/2020/01/CS_LTA__SIR_SAR_2__20200121T094259_20200121T095800_E001.nc', 'SIR_SAR_L2/2019/10/CS_LTA__SIR_SAR_2__20191005T151255_20191005T151621_E001.nc', 'SIR_SAR_L2/2020/04/CS_LTA__SIR_SAR_2__20200427T150701_20200427T151544_E001.nc', 'SIR_SAR_L2/2019/10/CS_LTA__SIR_SAR_2__20191024T004201_20191024T005059_E001.nc', 'SIR_SAR_L2/2020/03/CS_LTA__SIR_SAR_2__20200308T174708_20200308T175621_E001.nc', 'SIR_SAR_L2/2020/04/CS_LTA__SIR_SAR_2__20200402T162707_20200402T163602_E001.nc'] 57 | 58 | if int(platform.python_version_tuple()[0]) < 3: 59 | exit("Your Python version is {}. Please use version 3.0 or higher.".format(platform.python_version())) 60 | 61 | email = input("Please enter your e-mail: ") 62 | 63 | download_files(email, esa_files) 64 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: ICESat2_cloud_access 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python ~=3.10 6 | 7 | ################################################### 8 | # Imported dependencies and extensions # 9 | ################################################### 10 | 11 | - jupyterlab 12 | - earthaccess 13 | - hvplot 14 | - xarray 15 | - ipywidgets 16 | - h5netcdf 17 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/h5cloud/read_atl10.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #import coiled 4 | 5 | import geopandas as gpd 6 | import numpy as np 7 | import pandas as pd 8 | from rich import print as rprint 9 | from itertools import product 10 | from pqdm.threads import pqdm 11 | 12 | 13 | import earthaccess 14 | from h5coro import s3driver, webdriver 15 | import h5coro 16 | 17 | 18 | 19 | 20 | def get_strong_beams(f): 21 | """Returns ground track for strong beams based on IS2 orientation""" 22 | orient = f['orbit_info/sc_orient'][0] 23 | 24 | if orient == 0: 25 | return [f"gt{i}l" for i in [1, 2, 3]] 26 | elif orient == 1: 27 | return [f"gt{i}r" for i in [1, 2, 3]] 28 | else: 29 | raise KeyError("Spacecraft orientation neither forward nor backward") 30 | 31 | 32 | 33 | 34 | 35 | 36 | def read_atl10(files, bounding_box=None, executors=4, environment="local", credentials=None): 37 | """Returns a consolidated GeoPandas dataframe for a set of ATL10 file pointers. 38 | 39 | Parameters: 40 | files (list[S3FSFile]): list of authenticated fsspec file references to ATL10 on S3 (via earthaccess) 41 | executors (int): number of threads 42 | 43 | """ 44 | if environment == "local": 45 | driver = webdriver.HTTPDriver 46 | else: 47 | driver = s3driver.S3Driver 48 | 49 | GPS_EPOCH = pd.to_datetime('1980-01-06 00:00:00') 50 | 51 | def read_h5coro(file): 52 | """Reads datasets required for creating gridded freeboard from a single ATL10 file 53 | 54 | file: an authenticated fsspec file reference on S3 (returned by earthaccess) 55 | 56 | returns: a list of geopandas dataframes 57 | """ 58 | # Open file object 59 | h5 = h5coro.H5Coro(file, driver, credentials=credentials) 60 | 61 | # Get strong beams based on orientation 62 | ancillary_datasets = ["orbit_info/sc_orient", "ancillary_data/atlas_sdp_gps_epoch"] 63 | f = h5.readDatasets(datasets=ancillary_datasets, block=True) 64 | strong_beams = get_strong_beams(f) 65 | atlas_sdp_gps_epoch = f["ancillary_data/atlas_sdp_gps_epoch"][:] 66 | 67 | # Create list of datasets to load 68 | datasets = ["freeboard_segment/latitude", 69 | "freeboard_segment/longitude", 70 | "freeboard_segment/delta_time", 71 | "freeboard_segment/seg_dist_x", 72 | "freeboard_segment/heights/height_segment_length_seg", 73 | "freeboard_segment/beam_fb_height", 74 | "freeboard_segment/heights/height_segment_type"] 75 | ds_list = ["/".join(p) for p in list(product(strong_beams, datasets))] 76 | # Load datasets 77 | f = h5.readDatasets(datasets=ds_list, block=True) 78 | # rprint(f["gt2l/freeboard_segment/latitude"], type(f["gt2l/freeboard_segment/latitude"])) 79 | 80 | # Create a list of geopandas.DataFrames containing beams 81 | tracks = [] 82 | for beam in strong_beams: 83 | ds = {dataset.split("/")[-1]: f[dataset][:] for dataset in ds_list if dataset.startswith(beam)} 84 | 85 | # Convert delta_time to datetime 86 | ds["delta_time"] = GPS_EPOCH + pd.to_timedelta(ds["delta_time"]+atlas_sdp_gps_epoch, unit='s') 87 | # we don't need nanoseconds to grid daily let alone weekly 88 | ds["delta_time"] = ds["delta_time"].astype('datetime64[s]') 89 | 90 | # Add beam identifier 91 | ds["beam"] = beam 92 | 93 | # Set fill values to NaN - assume 100 m as threshold 94 | ds["beam_fb_height"] = np.where(ds["beam_fb_height"] > 100, np.nan, ds["beam_fb_height"]) 95 | 96 | geometry = gpd.points_from_xy(ds["longitude"], ds["latitude"]) 97 | del ds["longitude"] 98 | del ds["latitude"] 99 | 100 | gdf = gpd.GeoDataFrame(ds, geometry=geometry, crs="EPSG:4326") 101 | gdf.dropna(axis=0, inplace=True) 102 | if bounding_box is not None: 103 | bbox = [float(coord) for coord in bounding_box.split(",")] 104 | gdf = gdf.cx[bbox[0]:bbox[2],bbox[1]:bbox[3]] 105 | tracks.append(gdf) 106 | 107 | df = pd.concat(tracks) 108 | return df 109 | 110 | dfs = pqdm(files, read_h5coro, n_jobs=executors) 111 | combined = pd.concat(dfs) 112 | 113 | return combined 114 | 115 | 116 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/h5cloud/readme.md: -------------------------------------------------------------------------------- 1 | ## Running and scaling Python with [Coiled serverless functions](https://docs.coiled.io/user_guide/usage/functions/index.html). 2 | 3 | This script contains the same code to read ATL10 data as the notebook, the one difference is that we are using a function decorator from Coiled that allows us to execute the function in the cloud with no modifications whatsoever. 4 | 5 | The only requirement for this workflow is to have an active account with Coiled and execute this from our terminal: 6 | 7 | ```bash 8 | coiled login 9 | ``` 10 | 11 | This will open a browser tab to authenticate ourselves with their APIs 12 | 13 | > Note: If you would like to test this ask us to include you with Openscapes! 14 | 15 | 16 | Our functions can be parallelize, scaling the computation to hundreds of nodes if needed in the same way we could use Amazon lambda functions. Once we install and activate [`nsidc-tutorials`](../../binder/environment.yml) We can run the script with the following python command: 17 | 18 | ```bash 19 | python workflow.py --bbox="-180, -90, 180, -60" --year=2023 --out="test-2023-local" --env=local 20 | 21 | ``` 22 | 23 | This will run the code locally. If we want to run the code in the cloud we'll run: 24 | 25 | ```bash 26 | python workflow.py --bbox="-180, -90, 180, -60" --year=2023 --out="test-2023-local" --env=cloud 27 | 28 | ``` 29 | 30 | The first time we execute this function, the provisioning will take a couple minutes and will sync our current Python environment with the cloud instances executing our code. 31 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/h5cloud/workflow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import coiled 4 | 5 | import geopandas as gpd 6 | import numpy as np 7 | import pandas as pd 8 | from rich import print as rprint 9 | from itertools import product 10 | import argparse 11 | 12 | import earthaccess 13 | from h5coro import h5coro, s3driver 14 | 15 | from read_atl10 import read_atl10 16 | 17 | if __name__ == "__main__": 18 | 19 | rprint(f"executing locally") 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--bbox', help='bbox') 22 | parser.add_argument('--year', help='year to process') 23 | parser.add_argument('--env', help='execute in the cloud or local, default:local') 24 | parser.add_argument('--out', help='output file name') 25 | args = parser.parse_args() 26 | 27 | 28 | auth = earthaccess.login() 29 | 30 | # ross_sea = (-180, -78, -160, -74) 31 | # antarctic = (-180, -90, 180, -60) 32 | 33 | year = int(args.year) 34 | bbox = tuple([float(c) for c in args.bbox.split(",")]) 35 | 36 | print(f"Searching ATL10 data for year {year} ...") 37 | granules = earthaccess.search_data( 38 | short_name = 'ATL10', 39 | version = '006', 40 | cloud_hosted = True, 41 | bounding_box = bbox, 42 | temporal = (f'{args.year}-06-01',f'{args.year}-09-30'), 43 | count=4, 44 | debug=True 45 | ) 46 | 47 | 48 | if args.env == "local": 49 | files = [g.data_links(access="out_of_region")[0] for g in granules] 50 | credentials = earthaccess.__auth__.token["access_token"] 51 | 52 | df = read_atl10(files, bounding_box=args.bbox, environment="local", credentials=credentials) 53 | else: 54 | files = [g.data_links(access="direct")[0].replace("s3://", "") for g in granules] 55 | aws_credentials = earthaccess.get_s3_credentials("NSIDC") 56 | credentials = { 57 | "aws_access_key_id": aws_credentials["accessKeyId"], 58 | "aws_secret_access_key": aws_credentials["secretAccessKey"], 59 | "aws_session_token": aws_credentials["sessionToken"] 60 | } 61 | 62 | @coiled.function(region= "us-west-2", 63 | memory= "4 GB", 64 | keepalive="1 HOUR") 65 | def cloud_runnner(files, bounding_box, credentials): 66 | df = read_atl10(files, bounding_box=bounding_box, environment="cloud", credentials=credentials) 67 | return df 68 | 69 | df = cloud_runnner(files, args.bbox, credentials=credentials) 70 | 71 | 72 | df.to_parquet(f"{args.out}.parquet") 73 | rprint(df) 74 | 75 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/img/ATL10_CS2_L2_SAR_query_med.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_Cloud_Access/img/ATL10_CS2_L2_SAR_query_med.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/img/atl06_example_end_product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_Cloud_Access/img/atl06_example_end_product.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/img/icesat2-cryosat2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_Cloud_Access/img/icesat2-cryosat2.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/img/icesat2.atl10.gridded.count_segments.ross_sea.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_Cloud_Access/img/icesat2.atl10.gridded.count_segments.ross_sea.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/img/nsidc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_Cloud_Access/img/nsidc_logo.png -------------------------------------------------------------------------------- /notebooks/ICESat-2_Cloud_Access/ross_sea.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "FeatureCollection", 3 | "features": [ 4 | { 5 | "type": "Feature", 6 | "properties": {}, 7 | "geometry": { 8 | "coordinates": [ 9 | [ 10 | [ 11 | -191.09608556432593, 12 | -73.84648052492354 13 | ], 14 | [ 15 | -196.32896148365322, 16 | -76.01486891873441 17 | ], 18 | [ 19 | -201.3333475059275, 20 | -79.45419249238772 21 | ], 22 | [ 23 | -195.62738051734928, 24 | -82.20681871096693 25 | ], 26 | [ 27 | -189.41756278781764, 28 | -84.1511348270979 29 | ], 30 | [ 31 | -167.0795373869447, 32 | -84.71222453066771 33 | ], 34 | [ 35 | -154.94650971884352, 36 | -84.47077199426083 37 | ], 38 | [ 39 | -147.87987772139172, 40 | -83.76551904624706 41 | ], 42 | [ 43 | -138.89031336546202, 44 | -83.16126208208007 45 | ], 46 | [ 47 | -139.89760391715487, 48 | -81.81509135152459 49 | ], 50 | [ 51 | -145.07462138020958, 52 | -75.8454713912678 53 | ], 54 | [ 55 | -145.2859453568654, 56 | -73.60545521193768 57 | ], 58 | [ 59 | -155.7529050321871, 60 | -71.77435794070743 61 | ], 62 | [ 63 | -173.60352774698885, 64 | -71.50777786832501 65 | ], 66 | [ 67 | -187.08441940129651, 68 | -71.32576778967325 69 | ], 70 | [ 71 | -191.09608556432593, 72 | -73.84648052492354 73 | ] 74 | ] 75 | ], 76 | "type": "Polygon" 77 | }, 78 | "id": 0 79 | } 80 | ] 81 | } -------------------------------------------------------------------------------- /notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/NSIDC_logo_2018_web.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/NSIDC_logo_2018_web.jpg -------------------------------------------------------------------------------- /notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/README.md: -------------------------------------------------------------------------------- 1 | # AGU-2019-NSIDC-Data-Tutorial 2 | 3 | 4 | ## Presenters 5 | 6 | Amy Steiker, Walt Meier: NASA National Snow and Ice Data Center Distributed Active Archive Center (NSIDC DAAC) 7 | 8 | ## Authors 9 | 10 | Amy Steiker, Bruce Wallin, Andy Barrett, Walt Meier, Luis Lopez, Marin Klinger: NASA National Snow and Ice Data Center Distributed Active Archive Center (NSIDC DAAC) 11 | 12 | ## Summary 13 | 14 | The NSIDC DAAC provides a wide variety of remote sensing data on the cryosphere, often with disparate coverage and resolution. This tutorial will demonstrate our data discovery, access, and subsetting services, along with basic open source resources used to harmonize and analyze data across these diverse products. The tutorial will be presented as a series of Python-based Jupyter Notebooks, focusing on sea ice height and ice surface temperature data from NASA’s ICESat-2 and MODIS missions, respectively, to characterize Arctic sea ice. No coding experience or computing prerequisites are required, though some familiarity with Python and Jupyter Notebooks is recommended. The in-person tutorial utilized a JupyterHub environment that was preconfigured with the dependencies needed to run each operation in the series of notebooks. For those of you interested in running the notebooks outside of the in-person event, see the README in this NSIDC-Data-Tutorial repository for details on how to run using Binder and Conda. 15 | 16 | 17 | ## Key Learning Objectives 18 | 19 | 1) Become familiar with NSIDC resources, including user support documents, data access options, and data subsetting services. 20 | 21 | 2) Learn how to access and subset data programmatically using the NSIDC DAAC's API service. 22 | 23 | 3) Learn about the coverage, resolution, and structure of sea ice data from new NASA ICESat-2 mission. 24 | 25 | 3) Interact with ICESat-2 and MODIS data using basic Python science libraries to visualize, filter, and plot concurrent data. 26 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: nsidc-icesat2 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - pangeo-notebook 7 | - xarray 8 | - matplotlib-base 9 | - shapely 10 | - pandas 11 | - geopandas 12 | - h5py 13 | - pyresample 14 | - pyproj 15 | - fiona 16 | - cartopy 17 | - icepyx 18 | - pip 19 | platforms: 20 | - linux-64 21 | - osx-64 22 | - win-64 23 | -------------------------------------------------------------------------------- /notebooks/ICESat-2_MODIS_Arctic_Sea_Ice/tutorial_helper_functions.py: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------- 2 | # Functions for AGU tutorial notebooks 3 | # 4 | # In Python a module is just a collection of functions in a file with 5 | # a .py extension. 6 | # 7 | # Functions are defined using: 8 | # 9 | # def function_name(argument1, arguments2,... keyword_arg1=some_variable) 10 | # '''A docstring explaining what the function does and what 11 | # arguments it expectes. 12 | # ''' 13 | # 14 | # return some_value # Not required unless you need to return a value 15 | # 16 | #---------------------------------------------------------------------- 17 | 18 | import h5py 19 | from pathlib import Path 20 | import pandas as pd 21 | import numpy as np 22 | import geopandas as gpd 23 | from datetime import datetime, timedelta 24 | import pyproj 25 | import requests 26 | import json 27 | from statistics import mean 28 | from xml.etree import ElementTree as ET 29 | import os 30 | import pprint 31 | import shutil 32 | import zipfile 33 | import io 34 | import time 35 | 36 | 37 | def print_cmr_metadata(entry, fields=['dataset_id', 'version_id']): 38 | ''' 39 | Prints metadata from query to CMR collections.json 40 | 41 | entry - Metadata entry for a dataset 42 | fields - list of metdata fields to print 43 | ''' 44 | print(', '.join([f"{field}: {entry[field]}" for field in fields])) 45 | 46 | 47 | def granule_info(data_dict): 48 | ''' 49 | Prints number of granules based on inputted data set short name, version, bounding box, and temporal range. Queries the CMR and pages over results. 50 | 51 | data_dict - a dictionary with the following CMR keywords: 52 | 'short_name', 53 | 'version', 54 | 'bounding_box', 55 | 'temporal' 56 | ''' 57 | # set CMR API endpoint for granule search 58 | granule_search_url = 'https://cmr.earthdata.nasa.gov/search/granules' 59 | 60 | # add page size and page num to dictionary 61 | data_dict['page_size'] = 100 62 | data_dict['page_num'] = 1 63 | 64 | granules = [] 65 | headers={'Accept': 'application/json'} 66 | while True: 67 | response = requests.get(granule_search_url, params=data_dict, headers=headers) 68 | results = json.loads(response.content) 69 | 70 | if len(results['feed']['entry']) == 0: 71 | # Out of results, so break out of loop 72 | break 73 | 74 | # Collect results and increment page_num 75 | granules.extend(results['feed']['entry']) 76 | data_dict['page_num'] += 1 77 | 78 | # calculate granule size 79 | granule_sizes = [float(granule['granule_size']) for granule in granules] 80 | print('There are', len(granules), 'granules of', data_dict['short_name'], 'version', data_dict['version'], 'over my area and time of interest.') 81 | print(f'The average size of each granule is {mean(granule_sizes):.2f} MB and the total size of all {len(granules)} granules is {sum(granule_sizes):.2f} MB') 82 | return len(granules) 83 | 84 | 85 | def print_service_options(data_dict, response): 86 | ''' 87 | Prints the available subsetting, reformatting, and reprojection services available based on inputted data set name, version, and Earthdata Login username and password. 88 | 89 | data_dict - a dictionary with the following keywords: 90 | 'short_name', 91 | 'version', 92 | 'uid', 93 | 'pswd' 94 | ''' 95 | 96 | root = ET.fromstring(response.content) 97 | 98 | #collect lists with each service option 99 | subagent = [subset_agent.attrib for subset_agent in root.iter('SubsetAgent')] 100 | 101 | # variable subsetting 102 | variables = [SubsetVariable.attrib for SubsetVariable in root.iter('SubsetVariable')] 103 | variables_raw = [variables[i]['value'] for i in range(len(variables))] 104 | variables_join = [''.join(('/',v)) if v.startswith('/') == False else v for v in variables_raw] 105 | variable_vals = [v.replace(':', '/') for v in variables_join] 106 | 107 | # reformatting 108 | formats = [Format.attrib for Format in root.iter('Format')] 109 | format_vals = [formats[i]['value'] for i in range(len(formats))] 110 | if format_vals : format_vals.remove('') 111 | 112 | # reprojection options 113 | projections = [Projection.attrib for Projection in root.iter('Projection')] 114 | proj_vals = [] 115 | for i in range(len(projections)): 116 | if (projections[i]['value']) != 'NO_CHANGE' : 117 | proj_vals.append(projections[i]['value']) 118 | 119 | #print service information depending on service availability and select service options 120 | print('Services available for', data_dict['short_name'],':') 121 | print() 122 | if len(subagent) < 1 : 123 | print('No customization services available.') 124 | else: 125 | subdict = subagent[0] 126 | if subdict['spatialSubsetting'] == 'true': 127 | print('Bounding box subsetting') 128 | if subdict['spatialSubsettingShapefile'] == 'true': 129 | print('Shapefile subsetting') 130 | if subdict['temporalSubsetting'] == 'true': 131 | print('Temporal subsetting') 132 | if len(variable_vals) > 0: 133 | print('Variable subsetting') 134 | if len(format_vals) > 0 : 135 | print('Reformatting to the following options:', format_vals) 136 | if len(proj_vals) > 0 : 137 | print('Reprojection to the following options:', proj_vals) 138 | 139 | 140 | 141 | 142 | def request_data(param_dict,session): 143 | ''' 144 | Request data from NSIDC's API based on inputted key-value-pairs from param_dict. 145 | Different request methods depending on 'async' or 'sync' options. 146 | 147 | In addition to param_dict, input Earthdata login `uid` and `pswd`. 148 | ''' 149 | 150 | # Create an output folder if the folder does not already exist. 151 | path = str(os.getcwd() + '/Outputs') 152 | if not os.path.exists(path): 153 | os.mkdir(path) 154 | 155 | # Define base URL 156 | base_url = 'https://n5eil02u.ecs.nsidc.org/egi/request' 157 | 158 | # Different access methods depending on request mode: 159 | 160 | if param_dict['request_mode'] == 'async': 161 | request = session.get(base_url, params=param_dict) 162 | print('Request HTTP response: ', request.status_code) 163 | 164 | # Raise bad request: Loop will stop for bad response code. 165 | request.raise_for_status() 166 | print() 167 | print('Order request URL: ', request.url) 168 | print() 169 | esir_root = ET.fromstring(request.content) 170 | #print('Order request response XML content: ', request.content) 171 | 172 | #Look up order ID 173 | orderlist = [] 174 | for order in esir_root.findall("./order/"): 175 | orderlist.append(order.text) 176 | orderID = orderlist[0] 177 | print('order ID: ', orderID) 178 | 179 | #Create status URL 180 | statusURL = base_url + '/' + orderID 181 | print('status URL: ', statusURL) 182 | 183 | #Find order status 184 | request_response = session.get(statusURL) 185 | print('HTTP response from order response URL: ', request_response.status_code) 186 | 187 | # Raise bad request: Loop will stop for bad response code. 188 | request_response.raise_for_status() 189 | request_root = ET.fromstring(request_response.content) 190 | statuslist = [] 191 | for status in request_root.findall("./requestStatus/"): 192 | statuslist.append(status.text) 193 | status = statuslist[0] 194 | #print('Data request is submitting...') 195 | print() 196 | print('Initial request status is ', status) 197 | print() 198 | 199 | #Continue loop while request is still processing 200 | loop_response = session.get(statusURL) 201 | loop_root = ET.fromstring(loop_response.content) 202 | while status == 'pending' or status == 'processing': 203 | print('Status is not complete. Trying again.') 204 | time.sleep(10) 205 | loop_response = session.get(statusURL) 206 | 207 | # Raise bad request: Loop will stop for bad response code. 208 | loop_response.raise_for_status() 209 | loop_root = ET.fromstring(loop_response.content) 210 | 211 | #find status 212 | statuslist = [] 213 | for status in loop_root.findall("./requestStatus/"): 214 | statuslist.append(status.text) 215 | status = statuslist[0] 216 | print('Retry request status is: ', status) 217 | if status == 'pending' or status == 'processing': 218 | continue 219 | 220 | #Order can either complete, complete_with_errors, or fail: 221 | # Provide complete_with_errors error message: 222 | if status == 'failed': 223 | messagelist = [] 224 | for message in loop_root.findall("./processInfo/"): 225 | messagelist.append(message.text) 226 | print('error messages:') 227 | pprint.pprint(messagelist) 228 | print() 229 | 230 | # Download zipped order if status is complete or complete_with_errors 231 | if status == 'complete' or status == 'complete_with_errors': 232 | downloadURL = 'https://n5eil02u.ecs.nsidc.org/esir/' + orderID + '.zip' 233 | print('Zip download URL: ', downloadURL) 234 | print('Beginning download of zipped output...') 235 | zip_response = session.get(downloadURL) 236 | # Raise bad request: Loop will stop for bad response code. 237 | zip_response.raise_for_status() 238 | with zipfile.ZipFile(io.BytesIO(zip_response.content)) as z: 239 | z.extractall(path) 240 | print('Data request is complete.') 241 | else: print('Request failed.') 242 | 243 | else: 244 | print('Requesting...') 245 | request = session.get(s.url,auth=(uid,pswd)) 246 | print('HTTP response from order response URL: ', request.status_code) 247 | request.raise_for_status() 248 | d = request.headers['content-disposition'] 249 | fname = re.findall('filename=(.+)', d) 250 | dirname = os.path.join(path,fname[0].strip('\"')) 251 | print('Downloading...') 252 | open(dirname, 'wb').write(request.content) 253 | print('Data request is complete.') 254 | 255 | # Unzip outputs 256 | for z in os.listdir(path): 257 | if z.endswith('.zip'): 258 | zip_name = path + "/" + z 259 | zip_ref = zipfile.ZipFile(zip_name) 260 | zip_ref.extractall(path) 261 | zip_ref.close() 262 | os.remove(zip_name) 263 | 264 | 265 | def clean_folder(): 266 | ''' 267 | Cleans up output folder by removing individual granule folders. 268 | 269 | ''' 270 | path = str(os.getcwd() + '/Outputs') 271 | 272 | for root, dirs, files in os.walk(path, topdown=False): 273 | for file in files: 274 | try: 275 | shutil.move(os.path.join(root, file), path) 276 | except OSError: 277 | pass 278 | for name in dirs: 279 | os.rmdir(os.path.join(root, name)) 280 | 281 | 282 | def load_icesat2_as_dataframe(filepath, VARIABLES): 283 | ''' 284 | Load points from an ICESat-2 granule 'gt' groups as DataFrame of points. Uses VARIABLES mapping 285 | to select subset of '/gt/...' variables (Assumes these variables share dimensions) 286 | Arguments: 287 | filepath to ATL0# granule 288 | ''' 289 | 290 | ds = h5py.File(filepath, 'r') 291 | 292 | # Get dataproduct name 293 | dataproduct = ds.attrs['identifier_product_type'].decode() 294 | # Convert variable paths to 'Path' objects for easy manipulation 295 | variables = [Path(v) for v in VARIABLES[dataproduct]] 296 | # Get set of beams to extract individially as dataframes combining in the end 297 | beams = {list(v.parents)[-2].name for v in variables} 298 | 299 | dfs = [] 300 | for beam in beams: 301 | data_dict = {} 302 | beam_variables = [v for v in variables if beam in str(v)] 303 | for variable in beam_variables: 304 | # Use variable 'name' as column name. Beam will be specified in 'beam' column 305 | column = variable.name 306 | variable = str(variable) 307 | try: 308 | values = ds[variable][:] 309 | # Convert invalid data to np.nan (only for float columns) 310 | if 'float' in str(values.dtype): 311 | if 'valid_min' in ds[variable].attrs: 312 | values[values < ds[variable].attrs['valid_min']] = np.nan 313 | if 'valid_max' in ds[variable].attrs: 314 | values[values > ds[variable].attrs['valid_max']] = np.nan 315 | if '_FillValue' in ds[variable].attrs: 316 | values[values == ds[variable].attrs['_FillValue']] = np.nan 317 | 318 | data_dict[column] = values 319 | except KeyError: 320 | print(f'Variable {variable} not found in {filepath}. Likely an empty granule.') 321 | raise 322 | 323 | df = pd.DataFrame.from_dict(data_dict) 324 | df['beam'] = beam 325 | dfs.append(df) 326 | 327 | df = pd.concat(dfs, sort=True) 328 | # Add filename column for book-keeping and reset index 329 | df['filename'] = Path(filepath).name 330 | df = df.reset_index(drop=True) 331 | 332 | return df 333 | 334 | 335 | 336 | def convert_to_gdf(df): 337 | ''' 338 | Converts a DataFrame of points with 'longitude' and 'latitude' columns to a 339 | GeoDataFrame 340 | ''' 341 | gdf = gpd.GeoDataFrame( 342 | df, 343 | geometry=gpd.points_from_xy(df.longitude, df.latitude), 344 | crs={'init': 'epsg:4326'}, 345 | ) 346 | 347 | return gdf 348 | 349 | 350 | def convert_delta_time(delta_time): 351 | ''' 352 | Convert ICESat-2 'delta_time' parameter to UTC datetime 353 | ''' 354 | EPOCH = datetime(2018, 1, 1, 0, 0, 0) 355 | 356 | utc_datetime = EPOCH + timedelta(seconds=delta_time) 357 | 358 | return utc_datetime 359 | 360 | 361 | # def compute_distance(df): 362 | # ''' 363 | # Calculates along track distance for each point within the 'gt1l', 'gt2l', and 'gt3l' beams, beginning with first beam index. 364 | 365 | # Arguments: 366 | # df: DataFrame with icesat-2 data 367 | 368 | # Returns: 369 | # add_dist added as new column to initial df 370 | # ''' 371 | 372 | # beam_1 = df[df['beam'] == 'gt1l'] 373 | # beam_2 = df[df['beam'] == 'gt2l'] 374 | # beam_3 = df[df['beam'] == 'gt3l'] 375 | 376 | # add_dist = [] 377 | # add_dist.append(beam_1.height_segment_length_seg.values[0]) 378 | 379 | # for i in range(1, len(beam_1)): 380 | # add_dist.append(add_dist[i-1] + beam_1.height_segment_length_seg.values[i]) 381 | 382 | # add_dist_se = pd.Series(add_dist) 383 | # beam_1.insert(loc=0, column='add_dist', value=add_dist_se.values) 384 | # beam_1 385 | 386 | # add_dist = [] 387 | # add_dist.append(beam_2.height_segment_length_seg.values[0]) 388 | 389 | # for i in range(1, len(beam_2)): 390 | # add_dist.append(add_dist[i-1] + beam_2.height_segment_length_seg.values[i]) 391 | 392 | # add_dist_se = pd.Series(add_dist) 393 | # beam_2.insert(loc=0, column='add_dist', value=add_dist_se.values) 394 | # beam_2 395 | 396 | # add_dist = [] 397 | # add_dist.append(beam_3.height_segment_length_seg.values[0]) 398 | 399 | # for i in range(1, len(beam_3)): 400 | # add_dist.append(add_dist[i-1] + beam_3.height_segment_length_seg.values[i]) 401 | 402 | # add_dist_se = pd.Series(add_dist) 403 | # beam_3.insert(loc=0, column='add_dist', value=add_dist_se.values) 404 | # beam_3 405 | 406 | # beams = [beam_1,beam_2,beam_3] 407 | # df = pd.concat(beams,ignore_index=True) 408 | 409 | # return df 410 | -------------------------------------------------------------------------------- /notebooks/SMAP/01_download_smap_data.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e86eaecf-a612-4dbb-8bdc-5b5dfddf65b9", 6 | "metadata": {}, 7 | "source": [ 8 | "
\n", 9 | "\n", 10 | "\n", 11 | "# **1.0 Access SMAP data with Python**\n", 12 | "\n", 13 | "
\n", 14 | "---" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "id": "4101ae06-3984-435c-abcc-f6346d15069b", 20 | "metadata": {}, 21 | "source": [ 22 | "## **1. Tutorial Introduction/Overview**\n", 23 | "\n", 24 | "We will use the `earthaccess` library to authenticate with our Earthdata Login credentials and to search for and bulk download SMAP data. For this tutorial we wil use SPL3SMP version 008 as an example, but the same method can be applied to any other SMAP data sets archived at NSIDC. \n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "id": "dd6c0128-efe4-4fab-8721-55fc366e3c7e", 30 | "metadata": {}, 31 | "source": [ 32 | "### **Credits**\n", 33 | "\n", 34 | "This tutorial is based on the notebooks originally provided to NSIDC by Adam Purdy. Jennifer Roebuck of NSIDC updated the tutorials to include the latest version of SMAP data and use `earthaccess` for authentication, seatching for and downloading the data in order to incorporate it into the NSIDC-Data-Tutorials repo. \n", 35 | "\n", 36 | "For questions regarding the notebook, or to report problems, please create a new issue in the [NSIDC-Data-Tutorials repo](https://github.com/nsidc/NSIDC-Data-Tutorials/issues)." 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "id": "a57c664e-76f9-416e-ae03-75dce51b3cb7", 42 | "metadata": {}, 43 | "source": [ 44 | "### **Learning Goals**\n", 45 | "\n", 46 | "After completing this notebook you will be able to use the `earthaccess` library to:\n", 47 | "1. Authenticate with your Earthdata Login credentials.\n", 48 | "2. Search for SMAP data.\n", 49 | "3. Bulk download SMAP data." 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "id": "015703a9-f02a-42f4-8ff0-3b002bf4f2f5", 55 | "metadata": {}, 56 | "source": [ 57 | "### **Prerequisites**\n", 58 | "\n", 59 | "1. An Earthdata Login is required for data access. If you don't have one, you can register for one [here](https://urs.earthdata.nasa.gov/).\n", 60 | "2. A .netrc file, that contains your Earthdata Login credentials, in your home directory. The current recommended practice for authentication is to create a .netrc file in your home directory following these [instructions](https://nsidc.org/data/user-resources/help-center/programmatic-data-access-guide).\n", 61 | "3. The nsidc-tutorials environment is set up and activated. This [README](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/README.md) has setup instructions.\n" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "id": "c45f3276-1172-4bfb-8389-e9d3cbbe88f4", 67 | "metadata": {}, 68 | "source": [ 69 | "### **Time requirement**\n", 70 | "\n", 71 | "Allow 5 to 10 minutes to complete this tutorial." 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "id": "53b77eb5-d5ed-4ddd-8fb1-6c69618d7852", 77 | "metadata": {}, 78 | "source": [ 79 | "## **2. Tutorial steps**" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "7820a737-33f0-4470-b9a4-03c5c4f0354c", 85 | "metadata": {}, 86 | "source": [ 87 | "### **Import libraries**\n", 88 | "We need just two libraries, `os` for creating the directory to store the downloaded data in and `earthaccess` to authenticate, search for and download the data. " 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "id": "059690ab-7dff-45c9-816a-6060a191f550", 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "#Import libraries \n", 99 | "\n", 100 | "import os # needed to create the directory to store the downloaded data\n", 101 | "import earthaccess # used for authentication and searching for downloading the data" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "id": "1966ffa6-a5f2-4520-a8dc-f37678a2cf7a", 107 | "metadata": {}, 108 | "source": [ 109 | "### **Authenticate**\n", 110 | "\n", 111 | "The first step is to authenticate using our Earthdata Login credentials. The `login` method will automatically search for these credentials as environment variables or in a `.netrc` files, and if those aren't available it will prompt us to enter our username and password. We use a `.netrc` strategy. A `.netrc` file is a text file located in our home directory that contains login information for remote machines. If we don't have a `.netrc` file, `login` can create one for us:\n", 112 | "```\n", 113 | "earthaccess.login(strategy='interactive',persist=True)\n", 114 | "```" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "id": "d47aa955-3d91-4418-85f9-5772f400f712", 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "auth = earthaccess.login()" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "id": "95e2532d-219b-4b9d-b5b9-b43c95b1aa7d", 130 | "metadata": { 131 | "tags": [] 132 | }, 133 | "source": [ 134 | "### **Search for SPL3SMP data using spatial and temporal filters**\n", 135 | "We will use the `search_data` method from the `earthaccess` library and the following variabes to search for granules within the SPL3SMP data set:\n", 136 | "* `short_name` - this is the data set ID e.g. SPL3SMP. It can be found in the data set title on the data set landing page.\n", 137 | "* `version` - data set version number, also included in the data set title.\n", 138 | "* `cloud_hosted` - NSIDC is in the process of migrating data sets to the cloud. The data set we are interested is currently still archived on-premises so we will set this to False.\n", 139 | "* `temporal` - set a temporal filter by specifying a start and end date in the format YYYY-MM-DD. In this tutorial we will look for data for the month of March 2017.\n", 140 | "\n", 141 | "It will output the number of granules that meet the search criteria." 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "d66e54ff-71dc-422c-9e8a-5b154fa0dbf7", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "#Search for SPL3SMP files \n", 152 | "\n", 153 | "results = earthaccess.search_data(\n", 154 | " short_name = 'SPL3SMP',\n", 155 | " version = '008',\n", 156 | " cloud_hosted = False,\n", 157 | " temporal = ('2017-03-01','2017-03-31')\n", 158 | ")" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "id": "c7307f44-93cd-49b0-aa11-ae85aca29722", 164 | "metadata": {}, 165 | "source": [ 166 | "### **Download the data** \n", 167 | "Now that we have found the granules that meet our search criteria, we can download them using the `download` method from `earthaccess`. First, we will create a new directory to save the files in." 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "id": "80d938ed-4fe6-4bff-b71a-cce39e7a9bd4", 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "this_dir = os.getcwd()\n", 178 | "DATA_DIR = os.path.join(this_dir, 'data/L3_SM_P')\n", 179 | "\n", 180 | "if not os.path.exists(DATA_DIR):\n", 181 | " os.makedirs(DATA_DIR)\n" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "id": "0c0fc789-ac80-474a-9928-8d9d4564ceac", 187 | "metadata": {}, 188 | "source": [ 189 | "Now we will download the SPL3SMP data for March 2017." 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": null, 195 | "id": "467ece65-932a-46e1-9f4c-1b47b628266b", 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "smap_files = earthaccess.download(results,DATA_DIR)" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "id": "910b2ef6-3e14-475e-b689-77bda4c1814e", 205 | "metadata": {}, 206 | "source": [ 207 | "## **3. Learning outcomes recap (optional)**\n", 208 | "\n", 209 | "1. Authenticate with your Earthdata Login credentials.\n", 210 | "2. Search for SMAP data.\n", 211 | "3. Bulk download SMAP data.\n" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": null, 217 | "id": "3b6b4172-7ba8-451d-9051-912aea174adf", 218 | "metadata": {}, 219 | "outputs": [], 220 | "source": [] 221 | } 222 | ], 223 | "metadata": { 224 | "kernelspec": { 225 | "display_name": "Python 3 (ipykernel)", 226 | "language": "python", 227 | "name": "python3" 228 | }, 229 | "language_info": { 230 | "codemirror_mode": { 231 | "name": "ipython", 232 | "version": 3 233 | }, 234 | "file_extension": ".py", 235 | "mimetype": "text/x-python", 236 | "name": "python", 237 | "nbconvert_exporter": "python", 238 | "pygments_lexer": "ipython3", 239 | "version": "3.9.15" 240 | } 241 | }, 242 | "nbformat": 4, 243 | "nbformat_minor": 5 244 | } 245 | -------------------------------------------------------------------------------- /notebooks/SMAP/01_download_smap_data_rendered.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e86eaecf-a612-4dbb-8bdc-5b5dfddf65b9", 6 | "metadata": {}, 7 | "source": [ 8 | "
\n", 9 | "\n", 10 | "\n", 11 | "# **1.0 Access SMAP data with Python**\n", 12 | "\n", 13 | "
\n", 14 | "---" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "id": "4101ae06-3984-435c-abcc-f6346d15069b", 20 | "metadata": {}, 21 | "source": [ 22 | "## **1. Tutorial Introduction/Overview**\n", 23 | "\n", 24 | "We will use the `earthaccess` library to authenticate with our Earthdata Login credentials and to search for and bulk download SMAP data. For this tutorial we wil use SPL3SMP version 008 as an example, but the same method can be applied to any other SMAP data sets archived at NSIDC. \n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "id": "dd6c0128-efe4-4fab-8721-55fc366e3c7e", 30 | "metadata": {}, 31 | "source": [ 32 | "### **Credits**\n", 33 | "\n", 34 | "This tutorial is based on the notebooks originally provided to NSIDC by Adam Purdy. Jennifer Roebuck of NSIDC updated the tutorials to include the latest version of SMAP data and use `earthaccess` for authentication, seatching for and downloading the data in order to incorporate it into the NSIDC-Data-Tutorials repo. \n", 35 | "\n", 36 | "For questions regarding the notebook, or to report problems, please create a new issue in the [NSIDC-Data-Tutorials repo](https://github.com/nsidc/NSIDC-Data-Tutorials/issues)." 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "id": "a57c664e-76f9-416e-ae03-75dce51b3cb7", 42 | "metadata": {}, 43 | "source": [ 44 | "### **Learning Goals**\n", 45 | "\n", 46 | "After completing this notebook you will be able to use the `earthaccess` library to:\n", 47 | "1. Authenticate with your Earthdata Login credentials.\n", 48 | "2. Search for SMAP data.\n", 49 | "3. Bulk download SMAP data." 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "id": "015703a9-f02a-42f4-8ff0-3b002bf4f2f5", 55 | "metadata": {}, 56 | "source": [ 57 | "### **Prerequisites**\n", 58 | "\n", 59 | "1. An Earthdata Login is required for data access. If you don't have one, you can register for one [here](https://urs.earthdata.nasa.gov/).\n", 60 | "2. A .netrc file, that contains your Earthdata Login credentials, in your home directory. The current recommended practice for authentication is to create a .netrc file in your home directory following these [instructions](https://nsidc.org/data/user-resources/help-center/programmatic-data-access-guide).\n", 61 | "3. The nsidc-tutorials environment is set up and activated. This [README](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/README.md) has setup instructions.\n" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "id": "c45f3276-1172-4bfb-8389-e9d3cbbe88f4", 67 | "metadata": {}, 68 | "source": [ 69 | "### **Time requirement**\n", 70 | "\n", 71 | "Allow 5 to 10 minutes to complete this tutorial." 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "id": "53b77eb5-d5ed-4ddd-8fb1-6c69618d7852", 77 | "metadata": {}, 78 | "source": [ 79 | "## **2. Tutorial steps**" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "7820a737-33f0-4470-b9a4-03c5c4f0354c", 85 | "metadata": {}, 86 | "source": [ 87 | "### **Import libraries**\n", 88 | "We need just two libraries, `os` for creating the directory to store the downloaded data in and `earthaccess` to authenticate, search for and download the data. " 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 1, 94 | "id": "059690ab-7dff-45c9-816a-6060a191f550", 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "#Import libraries \n", 99 | "\n", 100 | "import os # needed to create the directory to store the downloaded data\n", 101 | "import earthaccess # used for authentication and searching for downloading the data" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "id": "1966ffa6-a5f2-4520-a8dc-f37678a2cf7a", 107 | "metadata": {}, 108 | "source": [ 109 | "### **Authenticate**\n", 110 | "\n", 111 | "The first step is to authenticate using our Earthdata Login credentials. The `login` method will automatically search for these credentials as environment variables or in a `.netrc` files, and if those aren't available it will prompt us to enter our username and password. We use a `.netrc` strategy. A `.netrc` file is a text file located in our home directory that contains login information for remote machines. If we don't have a `.netrc` file, `login` can create one for us:\n", 112 | "```\n", 113 | "earthaccess.login(strategy='interactive',persist=True)\n", 114 | "```" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 2, 120 | "id": "d47aa955-3d91-4418-85f9-5772f400f712", 121 | "metadata": {}, 122 | "outputs": [ 123 | { 124 | "name": "stdout", 125 | "output_type": "stream", 126 | "text": [ 127 | "EARTHDATA_USERNAME and EARTHDATA_PASSWORD are not set in the current environment, try setting them or use a different strategy (netrc, interactive)\n", 128 | "You're now authenticated with NASA Earthdata Login\n", 129 | "Using token with expiration date: 08/26/2023\n", 130 | "Using .netrc file for EDL\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "auth = earthaccess.login()" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "id": "95e2532d-219b-4b9d-b5b9-b43c95b1aa7d", 141 | "metadata": { 142 | "tags": [] 143 | }, 144 | "source": [ 145 | "### **Search for SPL3SMP data using spatial and temporal filters**\n", 146 | "We will use the `search_data` method from the `earthaccess` library and the following variabes to search for granules within the SPL3SMP data set:\n", 147 | "* `short_name` - this is the data set ID e.g. SPL3SMP. It can be found in the data set title on the data set landing page.\n", 148 | "* `version` - data set version number, also included in the data set title.\n", 149 | "* `cloud_hosted` - NSIDC is in the process of migrating data sets to the cloud. The data set we are interested is currently still archived on-premises so we will set this to False.\n", 150 | "* `temporal` - set a temporal filter by specifying a start and end date in the format YYYY-MM-DD. In this tutorial we will look for data for the month of March 2017.\n", 151 | "\n", 152 | "It will output the number of granules that meet the search criteria." 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": 3, 158 | "id": "d66e54ff-71dc-422c-9e8a-5b154fa0dbf7", 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "Granules found: 31\n" 166 | ] 167 | } 168 | ], 169 | "source": [ 170 | "#Search for SPL3SMP files \n", 171 | "\n", 172 | "results = earthaccess.search_data(\n", 173 | " short_name = 'SPL3SMP',\n", 174 | " version = '008',\n", 175 | " cloud_hosted = False,\n", 176 | " temporal = ('2017-03-01','2017-03-31')\n", 177 | ")" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "id": "c7307f44-93cd-49b0-aa11-ae85aca29722", 183 | "metadata": {}, 184 | "source": [ 185 | "### **Download the data** \n", 186 | "Now that we have found the granules that meet our search criteria, we can download them using the `download` method from `earthaccess`. First, we will create a new directory to save the files in." 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 4, 192 | "id": "80d938ed-4fe6-4bff-b71a-cce39e7a9bd4", 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "this_dir = os.getcwd()\n", 197 | "DATA_DIR = os.path.join(this_dir, 'data/L3_SM_P')\n", 198 | "\n", 199 | "if not os.path.exists(DATA_DIR):\n", 200 | " os.makedirs(DATA_DIR)\n" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "id": "0c0fc789-ac80-474a-9928-8d9d4564ceac", 206 | "metadata": {}, 207 | "source": [ 208 | "Now we will download the SPL3SMP data for March 2017." 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 5, 214 | "id": "467ece65-932a-46e1-9f4c-1b47b628266b", 215 | "metadata": {}, 216 | "outputs": [ 217 | { 218 | "name": "stdout", 219 | "output_type": "stream", 220 | "text": [ 221 | " Getting 31 granules, approx download size: 0.93 GB\n" 222 | ] 223 | }, 224 | { 225 | "data": { 226 | "application/vnd.jupyter.widget-view+json": { 227 | "model_id": "bd55adc8cc1b42d19658cbcc885b9c79", 228 | "version_major": 2, 229 | "version_minor": 0 230 | }, 231 | "text/plain": [ 232 | "SUBMITTING | : 0%| | 0/31 [00:00\n", 9 | "\n", 10 | "\n", 11 | "# **3.0 SMAP Quality Flags**\n", 12 | "\n", 13 | "\n", 14 | "---" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "id": "48e87d85-674a-4f38-8af9-14db223b0d96", 20 | "metadata": {}, 21 | "source": [ 22 | "## 1. **Overview**\n", 23 | "\n", 24 | "This provides an overview of the retrieval quality flags and surface quality flags that are used with SMAP data. \n", 25 | "\n", 26 | "* Retrieval Quality Flag (combines all surface flags)\n", 27 | "* Surface Quality Flag (provides information on why certain areas might be flagged) " 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "id": "88418780-4ced-44d3-a54c-01059c25f5f7", 33 | "metadata": {}, 34 | "source": [ 35 | "### **Credits**\n", 36 | "This tutorial is based on the notebooks originally provided to NSIDC by Adam Purdy. Jennifer Roebuck of NSIDC updated the tutorials to include the latest version of SMAP data and use earthaccess for authentication, seatching for and downloading the data in order to incorporate it into the NSIDC-Data-Tutorials repo. \n", 37 | "\n", 38 | "For questions regarding the notebook, or to report problems, please create a new issue in the [NSIDC-Data-Tutorials repo](https://github.com/nsidc/NSIDC-Data-Tutorials/issues).\n" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "id": "faef4353-4c94-45fe-9832-3831f3fa37e0", 44 | "metadata": {}, 45 | "source": [ 46 | "### **Learning Goals**\n", 47 | "\n", 48 | "1. Understand the retrieval and surface quality flags and how to use them" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "id": "d21ca604-20de-460d-be82-d504720160d6", 54 | "metadata": {}, 55 | "source": [ 56 | "### **Prerequisites**\n", 57 | "\n", 58 | "1. The nsidc-tutorials environment is set up and activated. This [README](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/README.md) has setup instructions.\n", 59 | "2. SMAP data that were downloaded in the first notebook tutorial - 1.0 Download SMAP data. " 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "id": "bc4ebd76-7580-4c7c-838b-7b0aae2a97a3", 65 | "metadata": {}, 66 | "source": [ 67 | "### **Time Requirement**\n", 68 | "\n", 69 | "Allow approximtely 5 to 10 minutes to complete this tutorial. " 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "id": "5770fce2-57be-4510-bdb6-b071c220d79b", 75 | "metadata": {}, 76 | "source": [ 77 | "## **2. Tutorial Steps**" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "id": "1087c320-3a87-420b-b241-0fbe56620f9b", 83 | "metadata": {}, 84 | "source": [ 85 | "### Import libraries\n" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "a3353e2a-5005-43d8-839a-e9c7e16fcf08", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "import datetime as dt\n", 96 | "import glob\n", 97 | "import h5py\n", 98 | "%matplotlib inline\n", 99 | "import matplotlib.pyplot as plt\n", 100 | "import numpy as np\n", 101 | "import os" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "id": "921ec8d3-5486-48a6-9684-15f66aef587a", 107 | "metadata": {}, 108 | "source": [ 109 | "Read in the SMAP data that we downloaded in the previous notebook." 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "id": "759498fc-2ded-4288-9d59-5bfbaf1c22ff", 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "this_dir = os.getcwd()\n", 120 | "L3_SM_P_dir = os.path.join(this_dir, 'data/L3_SM_P/')\n", 121 | "\n", 122 | "flist = glob.glob(os.path.join(L3_SM_P_dir, '*.h5'))\n", 123 | "filename = flist[0]; \n" 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "id": "358efeb8-49c7-496e-94d6-558178ae4cfb", 129 | "metadata": {}, 130 | "source": [ 131 | "Read in the soil moisture and surface_flag variables from the Soil_Moisture_Retrieval_Data_AM group in each of the files. " 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "id": "bc77eeae-bfa0-4a3c-88fc-2544b8fbc4e4", 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "f = h5py.File(filename, 'r')\n", 142 | "group_id = 'Soil_Moisture_Retrieval_Data_AM'\n", 143 | "var_id = list(f[group_id].keys())[25] # soil_moisture\n", 144 | "sm_data = f[group_id][var_id][:,:]\n", 145 | "surf_flag_L3_P = f[group_id]['surface_flag'][:,:]" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "id": "7cf93589-41b5-44d7-988f-394fbf18c514", 151 | "metadata": {}, 152 | "source": [ 153 | "Now lets look at the two types of flags\n", 154 | "\n", 155 | "### Retrieval Flags \n", 156 | "\n", 157 | "Four different values are possible, as outlined in the cell below. We will plot the retrieval quality flag and in the resulting plot areas that have a value of 0 (black regions) include data of recommended quality. " 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "id": "a85e1545-84ea-485a-8653-e37a27654e40", 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "# Grab the Retrieval Quality Variable\n", 168 | "ret_flag_L3_P = f[group_id]['retrieval_qual_flag'][:,:]\n", 169 | "# Create a definition for the retrieval flags\n", 170 | "ret_flags = {\n", 171 | " 0:'Recommended Quality',\n", 172 | " 1:'Retrieval Attempted',\n", 173 | " 2:'Retrieval Successful',\n", 174 | " 3:'Undefined'\n", 175 | "}\n", 176 | "# SMAP RECOMMENDED QUALITY BIT IS 0\n", 177 | "fig, ax = plt.subplots()\n", 178 | "\n", 179 | "cax = ax.imshow((ret_flag_L3_P>>0)&1, cmap=plt.cm.get_cmap('bone', 2))\n", 180 | "ax.set_title(ret_flags[0])\n", 181 | "\n", 182 | "cbar = fig.colorbar(cax, ticks=[0, 1], orientation='horizontal')\n", 183 | "cbar.ax.set_xticklabels(['Good Data', 'Not Recommended']) # horizontal colorbar\n", 184 | "\n", 185 | "plt.show()\n" 186 | ] 187 | }, 188 | { 189 | "cell_type": "markdown", 190 | "id": "6537954f-7f34-4ce9-8e1d-a4af62dc32ec", 191 | "metadata": {}, 192 | "source": [ 193 | "### Surface Flags \n", 194 | "\n", 195 | "The different values that the flag can have are listed in the cell below " 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "id": "c2e7941b-c392-4c91-be1c-5175b5b507a1", 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "surf_flags = {\n", 206 | " 0:'Static water body',\n", 207 | " 1:'Radar water body detection',\n", 208 | " 2:'Coastal Proximity',\n", 209 | " 3:'Urban Area',\n", 210 | " 4:'Precipitation',\n", 211 | " 5:'Snow or Ice',\n", 212 | " 6:'Permanent Snow or Ice',\n", 213 | " 7:'Frozen Ground (radiometer)',\n", 214 | " 8:'Frozen Ground (model)',\n", 215 | " 9:'Mountainous Terrain',\n", 216 | " 10:'Dense Vegetation',\n", 217 | " 11:'Nadir Region',\n", 218 | " 12:'Undefined'\n", 219 | "}" 220 | ] 221 | }, 222 | { 223 | "cell_type": "markdown", 224 | "id": "d92e6b62-08c2-4267-ae47-f299569dc206", 225 | "metadata": {}, 226 | "source": [ 227 | "Now we will plot the surface flags, where black areas indicate no flag and white areas indicate flagged data." 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": null, 233 | "id": "689bfe5b-60e6-4c7c-8a61-63947721537a", 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [ 237 | "for i in np.arange(0,12):\n", 238 | " fig, ax = plt.subplots()\n", 239 | " cax = ax.imshow((surf_flag_L3_P>>i)&1, cmap=plt.cm.get_cmap('bone', 2))\n", 240 | " ax.set_title(surf_flags[i])\n", 241 | " cbar = fig.colorbar(cax, ticks=[0, 1], orientation='horizontal')\n", 242 | " cbar.ax.set_xticklabels(['No Flag', 'Flag Present']) # horizontal colorbar\n", 243 | " plt.show()" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "e544f7dc-15b4-473a-b09a-043b13a94b97", 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [] 253 | } 254 | ], 255 | "metadata": { 256 | "kernelspec": { 257 | "display_name": "Python 3 (ipykernel)", 258 | "language": "python", 259 | "name": "python3" 260 | }, 261 | "language_info": { 262 | "codemirror_mode": { 263 | "name": "ipython", 264 | "version": 3 265 | }, 266 | "file_extension": ".py", 267 | "mimetype": "text/x-python", 268 | "name": "python", 269 | "nbconvert_exporter": "python", 270 | "pygments_lexer": "ipython3", 271 | "version": "3.9.15" 272 | } 273 | }, 274 | "nbformat": 4, 275 | "nbformat_minor": 5 276 | } 277 | -------------------------------------------------------------------------------- /notebooks/SMAP/EASE2_M36km.lats.964x406x1.double: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/SMAP/EASE2_M36km.lats.964x406x1.double -------------------------------------------------------------------------------- /notebooks/SMAP/EASE2_M36km.lons.964x406x1.double: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/SMAP/EASE2_M36km.lons.964x406x1.double -------------------------------------------------------------------------------- /notebooks/SMAP/README.md: -------------------------------------------------------------------------------- 1 | ## SMAP Python Notebooks 2 | 3 | ### Summary 4 | 5 | In this set of three tutorials we demonstrate how to search for, download and plot SMAP data. Tutorial 1 demonstrates how to search for and download SMAP data using the `earthaccess` library. The second tutorial demonstrates how to read in and plot the data downloaded in Tutorial 1. And Tutorial 3 provides information on the surface quality and retrieval quality flags. 6 | 7 | We use the [SMAP L3 Radiometer Global Daily 36 km EASE-Grid Soil Moisture, Version 8](https://nsidc.org/data/SPL3SMP/versions/8) data set as an example. 8 | 9 | **NOTE** these notebooks are an updated version of the notebooks orginially published in this [repo](https://github.com/nsidc/smap_python_notebooks/tree/main). The notebooks are based on notebooks originally provided to NSIDC by Adam Purdy. Jennifer Roebuck of NSIDC applied the following updates: 10 | * Used `earthaccess` instead of `requests` for authentication, searching for and downloading the data. This reduced the code to just a few lines. 11 | * Used a more recent version of SPL3SMP (version 8). 12 | * Replaced the use of `Basemap` with `cartopy`. 13 | * Updated the surface quality flag names to reflect the ones used in the latest version. 14 | * Minor text edits to provide additional information where necessary. 15 | * Moved the tutorials to the standard NSIDC tutorials template. 16 | 17 | ### Set up 18 | 19 | To run the notebook provided in this folder, please see the [NSIDC-Data-Tutorials repository readme](https://github.com/nsidc/NSIDC-Data-Tutorials#readme) for instructions on several ways (using Binder, Docker, or Conda) to do this. 20 | 21 | ### Key Learning Objectives 22 | 23 | 1. Use the `earthaccess` library to search for and download SMAP data. 24 | 2. Use the `h5py` library to read in the HDF5 files and plot the variables on a map using `cartopy` and `matplotlib`. 25 | 3. Understand the surface quality and retrieval quality flag options. 26 | -------------------------------------------------------------------------------- /notebooks/SMAP/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: smap-tutorial 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.10 6 | - jupyterlab 7 | - earthaccess 8 | - h5py 9 | - xarray 10 | - rioxarray 11 | - h5netcdf 12 | - matplotlib 13 | - cartopy 14 | - affine 15 | - pipreqsnb 16 | - conda-lock>=1.2.1 17 | - mamba>=1.0 18 | platforms: 19 | - linux-64 20 | - osx-64 21 | - win-64 22 | -------------------------------------------------------------------------------- /notebooks/SMAP/img/nsidc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/SMAP/img/nsidc_logo.png -------------------------------------------------------------------------------- /notebooks/SnowEx_ASO_MODIS_Snow/Data-download-polygon-export.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/SnowEx_ASO_MODIS_Snow/Data-download-polygon-export.png -------------------------------------------------------------------------------- /notebooks/SnowEx_ASO_MODIS_Snow/Data/nsidc-polygon.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "Feature", 3 | "geometry": { 4 | "type": "Polygon", 5 | "coordinates": [ 6 | [ 7 | [ 8 | -108.2352445938561, 9 | 38.98556907427165 10 | ], 11 | [ 12 | -107.85284607930835, 13 | 38.978765032966244 14 | ], 15 | [ 16 | -107.85494925720668, 17 | 39.10596902171742 18 | ], 19 | [ 20 | -108.22772795408136, 21 | 39.11294532581687 22 | ], 23 | [ 24 | -108.2352445938561, 25 | 38.98556907427165 26 | ] 27 | ] 28 | ] 29 | }, 30 | "properties": {} 31 | } -------------------------------------------------------------------------------- /notebooks/SnowEx_ASO_MODIS_Snow/README.md: -------------------------------------------------------------------------------- 1 | # Snow Depth and Snow Cover Data Exploration 2 | 3 | ## Summary 4 | 5 | This tutorial demonstrates how to access and compare coincident snow data from the National Snow and Ice Data Center Distributed Active Archive Center (NSIDC DAAC) across in-situ, airborne, and satellite platforms from NASA's SnowEx, ASO, and MODIS data sets, respectively. 6 | 7 | ## Key Learning Objectives 8 | 9 | 1. Learn about the coverage, resolution, and structure of snow data sets from NASA's SnowEx, ASO, and MODIS data sets. 10 | 11 | 2. Learn how to find and download spatiotemporally coincident data across in-situ, airborne, and satellite observations. 12 | 13 | 3. Learn how to read data into Python from CSV and GeoTIFF formats. 14 | 15 | 4. Learn how to subset data based on a buffered area. 16 | 17 | 5. Learn how to extract and visualize raster values at point locations. 18 | -------------------------------------------------------------------------------- /notebooks/SnowEx_ASO_MODIS_Snow/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: nsidc-tutorials 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - pangeo-notebook 7 | - xarray 8 | - earthaccess 9 | - matplotlib-base 10 | - shapely 11 | - geopandas 12 | - h5py 13 | - pyresample 14 | - fiona 15 | - descartes 16 | - rasterio 17 | - cartopy 18 | platforms: 19 | - linux-64 20 | - osx-64 21 | - win-64 22 | -------------------------------------------------------------------------------- /notebooks/iceflow/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | F812, # list comprehension redefines 4 | H101 # Use TODO(NAME) 5 | exclude = 6 | .git, 7 | __pycache__, 8 | docs/source/conf.py, 9 | dist 10 | max-line-length = 120 11 | max-complexity = 10 12 | -------------------------------------------------------------------------------- /notebooks/iceflow/1_widget.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "\n", 9 | "\n", 10 | "# **IceFlow**\n", 11 | "### Point Cloud Data Access\n", 12 | "
\n", 13 | "\n", 14 | "# Accessing Data with the IceFlow Widget\n", 15 | "\n", 16 | "\n", 17 | "### NASA's Earthdata Credentials\n", 18 | "\n", 19 | "To access data using the *IceFlow* library it is necessary to log into [Earthdata Login](https://urs.earthdata.nasa.gov/). To do this, enter your NASA Earthdata credentials in the next step after executing the following code cell.\n", 20 | "\n", 21 | "**Note**: If you don't have NASA Earthdata credentials you have to register first at the link above. You don't need to be a NASA employee to register with NASA Earthdata!\n" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": { 28 | "tags": [] 29 | }, 30 | "outputs": [], 31 | "source": [ 32 | "# Importing IceFlow client library\n", 33 | "from iceflow.ui import IceFlowUI\n", 34 | "from iceflow.client import IceflowClient\n", 35 | "import earthaccess\n", 36 | "# Instantiateing the client\n", 37 | "ui = IceFlowUI()\n", 38 | "ifc = IceflowClient()\n", 39 | "# You need to use your NASA Earthdata Credentials and verify that they work.\n", 40 | "# Please click on set credentials and then see if authentication is successful by executing the next cell.\n", 41 | "auth = earthaccess.login()" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": { 48 | "tags": [] 49 | }, 50 | "outputs": [], 51 | "source": [ 52 | "# This cell will verify if your credentials are valid. \n", 53 | "# This may take a little while, if it fails for some reason try again.\n", 54 | "# NOTE: Wednesday mornings are usually downtime for NSIDC services and you might experience difficulties accessing data.\n", 55 | "authorized = ifc.authenticate(auth.username, auth.password, auth.user_profile[\"email_address\"])\n", 56 | "if authorized:\n", 57 | " print(\"We are logged into NSIDC's data ordering system\")\n", 58 | " ui.iceflow = ifc" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "**Note:** To view the widget output you need to enable Jupyter's log console. You can activate the console by clicking the right-most icon at the bottom of your browser or by slecting “View” >> “Show log console” on the menu bar.\n", 66 | "\n", 67 | "Let's start loading the user interface. Next, we will explain all options and user interface components.\n", 68 | "vertical = Sidecar widget, horizontal = render the widget in this notebook. Note that depending on your screen size and resolution the 'vertical' display option may not work correctly. This is a current bug in the jupyter-widget that can not be solved within the scope of IceFlow." 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": { 75 | "tags": [] 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "ui.display_map('horizontal', extra_layers=True)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "#### IceFlow user interface (UI) components\n", 87 | "\n", 88 | "\n", 89 | "This user interface uses [*ipyleaflet*](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a) which allows us to draw\n", 90 | "polygons or bounding boxes to delimit our area of interest. We can also edit and delete these geometries using the widget controls in the map.\n", 91 | "
\n", 92 | "
**The following list describes all user interface data selection options:**\n", 93 | "\n", 94 | "- **Hemisphere**: Choose which map projection you are going to use, you can pick global, north or south.\n", 95 | "\n", 96 | "- **Data sets**: Choose one ore more data sets from the selection. For more than one data set use \"CTRL+Space\" or \"CTRL+Click\" on Windows and Linux or \"command+click\" on a Mac. Note: ATM1B includes the 3 different ATM products (BLATM L1B, ILATM L1B v1, ILATM L1B V2), see the above table for more details.\n", 97 | "\n", 98 | "- **ITRF (optional)**: Choose an International Terrestrial Reference Frame for more details see [ITRF](corrections.ipynb).\n", 99 | "\n", 100 | "- **Epoch (optional)**: Populate this field with the epoch value in which you want the coordinate reference systems to be based. This can only be applied if a ITRF is selected. (e.g. if you use 2010.1 and ITRF 2014 then all the points will be shifted to match the best ground representation as if they were in January 2010. This is compensating for plate tectonic motion.)\n", 101 | "\n", 102 | "- **ICESat-2**: If you additionally want to place a data order for ICESat-2 data (using icepyx) utilizing the current parameters you need to select the short name code of the desired data set i.e. ATL06. \n", 103 | "\n", 104 | "- **Date Range**: This slider control allows you to select a start and end date of interest.\n", 105 | "\n", 106 | "- **Inside Map options**: In the map part of the widget, you can zoom in and out, draw a polygon or bounding boxes and edit them to select an area of interest. You can also turn on and off the layers that show IceBridge flights and Ice Velocities.\n", 107 | "\n", 108 | "**The following list describes all user interface buttons:**\n", 109 | "\n", 110 | "- The **Get Raw Granule Count** button will query [NASA's CMR](https://earthdata.nasa.gov/eosdis/science-system-description/eosdis-components/cmr) to get a granule count for the current parameters, you need to have a geometry and one or more data sets selected. The result of the query gets displayed in the log window. **Important:** Check the selected raw granule count before placing an order. As a rule of thumb, you can expect a wait time of approximately 10 minutes each 1 Gigabyte of data selected. Keep in mind to run this notebook locally (not with the Binder) if you have large data orders as the Binder will time out after approximately 10 minutes.\n", 111 | "\n", 112 | "- The **Print Current Parameter** button displays the selected start and end time, bounding box and data set(s) in the log window.\n", 113 | "\n", 114 | "- The **Place Data Order** button will submit an *IceFlow* order using the current user interface parameters, this is an **asynchronous** process, you will have to wait until the order is completed before you can work with the data or place a new order but this does not block you from exploring the rest of the notebook while waiting for the order to complete.\n", 115 | "\n", 116 | "- The **Check Order Status** button will output the status of the order in the log window.\n", 117 | "\n", 118 | "- The **Download Data Order** button will download the data from an order that has been completed.\n", 119 | "\n", 120 | "**Notes**: \n", 121 | "* If you use the bounding box geometry in a polar projection, you'll notice a distortion due the nature of polar coordinates, if you prefer, you can use the global Mercator map to draw a bounding box without apparent distortion. The better option is to draw a polygon or enter your exact desired coordinates. [How to do that will be covered later in this tutorial.] \n", 122 | "* The calculated download size of these granules is an upper bound since *IceFlow* allows us to subset the data. \n" 123 | ] 124 | } 125 | ], 126 | "metadata": { 127 | "kernelspec": { 128 | "display_name": "Python 3 (ipykernel)", 129 | "language": "python", 130 | "name": "python3" 131 | }, 132 | "language_info": { 133 | "codemirror_mode": { 134 | "name": "ipython", 135 | "version": 3 136 | }, 137 | "file_extension": ".py", 138 | "mimetype": "text/x-python", 139 | "name": "python", 140 | "nbconvert_exporter": "python", 141 | "pygments_lexer": "ipython3", 142 | "version": "3.9.16" 143 | } 144 | }, 145 | "nbformat": 4, 146 | "nbformat_minor": 4 147 | } 148 | -------------------------------------------------------------------------------- /notebooks/iceflow/2_api.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "\n", 9 | "\n", 10 | "# **IceFlow**\n", 11 | "### Point Cloud Data Access\n", 12 | "
\n", 13 | "\n", 14 | "---\n", 15 | "\n", 16 | "# Accessing Data using the IceFlow API\n", 17 | "\n", 18 | "This notebook shows how to access data programmatically using the *IceFlow* API and describes the parameters in more detail.
\n", 19 | "\n", 20 | "\n", 21 | "## API Parameters:\n", 22 | "\n", 23 | "### Selecting Data Sets\n", 24 | "\n", 25 | "* The ***datasets*** parameter allows to list all data set of interest. See the [introduction notebook](./0_introduction.ipynb) section 4 for more information on the available data sets.
\n", 26 | "Example: `'datasets': ['GLAH06', 'ATL06']`\n", 27 | "\n", 28 | "### Choosing Optional Corrections\n", 29 | "\n", 30 | "* The optional ***ITRF*** parameter allows you to choose an ITRF reference to which the data will be transformed via the published ITRF transformation parameters. This parameter is optional but must be used if you want to specify an epoch. Available values are: **ITRF2000, ITRF2008, ITRF2014**
\n", 31 | "Example: `'ITRF': '2014'`\n", 32 | "* The ***epoch*** parameter is optional and entered in decimal years to which the data will be transformed via the ITRF Plate Motion Model corresponding to ITRF. This parameter can only be used if the ***ITRF*** parameter is specified and set to either 2008 or 20014, as only ITRF2008 and ITRF2014 have a plate motion model.
\n", 33 | "Example: `'epoch': '2014.1'` (This specifies January 2014.)\n", 34 | "\n", 35 | "### Setting Temporal Extend\n", 36 | "\n", 37 | "You can set the range of dates over which to return data using the ***start*** and ***end*** parameters:\n", 38 | "* The ***start*** parameter accepts UTF datetime or simple YYYY-mm-dd formatted values
\n", 39 | "Example: `'start': '2003-01-01'`\n", 40 | "* The ***end*** parameter accepts UTF datetime or simple YYYY-mm-dd formatted values
\n", 41 | "Example: `'end': '2019-01-01'`\n", 42 | "\n", 43 | "### Setting Spatial Extend\n", 44 | "\n", 45 | "You can use one of the two available options (***polygon***,***bbox***) to make your spatial selection:\n", 46 | "\n", 47 | "* The ***Polygon*** parameter allows you to enter a counterclockwise closed array of latitude and longitude values ending with the same coordinate as the starting pair.
\n", 48 | "Example: `'polygon'='-50.5811,69.4995,-49.3726,69.7632,-48.6035,69.8291,-47.8345,69.4995,-47.6587,68.8843,-48.6255,68.3130,-49.8077,69.0601,-50.4449,68.9941,-50.5811,69.4995'` \n", 49 | "\n", 50 | "* The ***bbox*** parameter allows you to choose a WGS84 bounding box with min_lon, min_lat, max_lon, max_lat values.
\n", 51 | "Example: `'bbox': '-107.4515,-75.3695,-105.3794,-74.4563'` \n", 52 | "\n", 53 | "The following code is an example on how to order data with the *IceFlow* API, but first you have to log into EarthData:\n", 54 | "### NASA's Earthdata Credentials\n", 55 | "\n", 56 | "To access data using the *IceFlow* library it is necessary to log into [Earthdata Login](https://urs.earthdata.nasa.gov/). We showed you how to do this with a user interface in the [introduction](0_introduction.ipynb) and the [widget](1_widget.ipynb) notebooks. Here we show you how to do that programmatically. For that you have to change the values for `'username'`,`'password'`, and `'email'` to your personal Earthdata credentials before you run the next cell. " 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "# We import our API client and authenticate with our NASA Earthdata credentials\n", 66 | "from iceflow.client import IceflowClient\n", 67 | "import earthaccess\n", 68 | "ifc = IceflowClient()\n", 69 | "\n", 70 | "auth = earthaccess.login()" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "if ifc.authenticate(auth.username, auth.password, auth.user_profile[\"email_address\"]) is not None:\n", 80 | " print('Authenticated with NASA Earthdata')\n", 81 | "else:\n", 82 | " print('Login failed')" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "# We use IceFlow to query CMR and get an idea of how many granules we'll be sub-setting.\n", 92 | "\n", 93 | "# Thwaites glacier ICESat/GLAS and ICESat-2\n", 94 | "my_params ={\n", 95 | " 'datasets': ['GLAH06', 'ATL06'],\n", 96 | " 'ITRF': '2014',\n", 97 | " 'epoch': '2014.1',\n", 98 | " 'start': '2003-01-01',\n", 99 | " 'end': '2019-01-01',\n", 100 | " 'bbox': '-107.4515,-75.3695,-105.3794,-74.4563'\n", 101 | "}\n", 102 | "granules = ifc.query_cmr(my_params)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "Now we order the data, this is an asynchronous process. Asynchronous downloads allow concurrent requests to be queued and processed without the need for a continuous connection. The maximum granule count per asynchronous request is 2000. When the order is complete.\n", 110 | "Now you can proceed to download the IceFlow data granule." 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "orders = []\n", 120 | "# Uncomment the following line to place the orders!\n", 121 | "# orders = ifc.place_data_orders(my_params)\n", 122 | "orders" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "# To check the order, you can run this cell:\n", 132 | "for order in orders:\n", 133 | " status = ifc.check_order_status(order)\n", 134 | " print(status)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "#Once all orders are complete you can download the data:\n", 144 | "for order in orders:\n", 145 | " status = ifc.check_order_status(order)\n", 146 | " if status['status'] == 'COMPLETE':\n", 147 | " ifc.download_order(order) " 148 | ] 149 | } 150 | ], 151 | "metadata": { 152 | "kernelspec": { 153 | "display_name": "Python 3 (ipykernel)", 154 | "language": "python", 155 | "name": "python3" 156 | }, 157 | "language_info": { 158 | "codemirror_mode": { 159 | "name": "ipython", 160 | "version": 3 161 | }, 162 | "file_extension": ".py", 163 | "mimetype": "text/x-python", 164 | "name": "python", 165 | "nbconvert_exporter": "python", 166 | "pygments_lexer": "ipython3", 167 | "version": "3.9.16" 168 | } 169 | }, 170 | "nbformat": 4, 171 | "nbformat_minor": 4 172 | } 173 | -------------------------------------------------------------------------------- /notebooks/iceflow/3_dataviz.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "\n", 9 | "\n", 10 | "# **IceFlow**\n", 11 | "### Point Cloud Data Access\n", 12 | "
\n", 13 | "\n", 14 | "---\n", 15 | "\n", 16 | "## Visualizing Large Data Sets\n", 17 | "IceFlow and ICESat-2 data sets are big data sets that require some special considerations when working with them. The main constraint is memory. The average granule size is in the 10s of Megabyte for ICESat-2 and could be Gigabytes in *IceFlow* depending on the order/subsetting. \n", 18 | "\n", 19 | "This notebook will use some basic plotting techniques using *vaex*, *matplotlib* and *geopandas* to work effectively with point cloud data from *IceFlow* and ICESat-2 data.\n" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "tags": [] 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "import warnings\n", 31 | "warnings.filterwarnings(\"ignore\")\n", 32 | "import glob\n", 33 | "import geopandas\n", 34 | "import pandas as pd\n", 35 | "import h5py\n", 36 | "import vaex\n", 37 | "import numpy as np\n", 38 | "from iceflow.processing import IceFlowProcessing as ifp\n", 39 | "\n", 40 | "# filepath = 'data/atm1b_data_2020-07-10T15-32.hdf5'\n", 41 | "# df_k = ifp.get_common_dictionary('ATM')\n", 42 | "\n", 43 | "filepath = 'data/twaties-test-GLAH06-2000-2010.h5'\n", 44 | "df_key = ifp.get_common_dictionary('GLAS')" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "## Loading Data with H5PY" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": { 58 | "tags": [] 59 | }, 60 | "outputs": [], 61 | "source": [ 62 | "%%time\n", 63 | "\n", 64 | "f = h5py.File(filepath, 'r')\n", 65 | "print(list(f.keys()))" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "## Vaex Decimating Big Datafames \n", 73 | "\n", 74 | "[*Vaex*](https://vaex.io/docs/index.html) is a python library for \"lazy\" processing (computing on the fly, no wasted ram), to visualize and explore big tabular data sets. Using *IceFlow*'s common data dictionary, we are going to create a \"decimated\" dataframe with only 1/100 of the size of the original dataframe to help us plot the data faster. \n" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": { 81 | "tags": [] 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "%%time\n", 86 | "\n", 87 | "df = vaex.open(filepath)\n", 88 | "# We're parsing the utc_datetime from IceFlow into a data type that vaex understands.\n", 89 | "df['date'] = df.utc_datetime.values.astype('datetime64[ns]')\n", 90 | "# my_df = df['longitude', 'latitude', 'elevation', 'date']\n", 91 | "# Note that we need a common dictionary because in GLAH06 elevation is d_elev and in ICESat-2 is called elevation! \n", 92 | "my_df = df[df_key['latitude'], df_key['longitude'], df_key['elevation'], 'date']\n", 93 | "# vaex.vrange() is like numpy.arange but uses 0-memory no matter the length.\n", 94 | "# This is to down-sample the data for dataviz see: https://github.com/vaexio/vaex/issues/911\n", 95 | "df.add_column('index', vaex.vrange(0, len(df)))\n", 96 | "# We are going to create a \"decimated\" dataframe with only 1/100 of the size of the original to plot the big picture faster.\n", 97 | "df_decimated = df[(df.index % 100 == 0)]\n", 98 | "my_df.describe()\n", 99 | "display(my_df)" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "## Visualizing the Big Picture\n", 107 | "\n", 108 | "Here we will quickly plot the decimated dataframe to view coverage and cross-over locations." 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": { 115 | "tags": [] 116 | }, 117 | "outputs": [], 118 | "source": [ 119 | "# There is a bug in Vaex that needs to be fixed: https://github.com/vaexio/vaex/pull/2353\n", 120 | "\n", 121 | "# my_df.widget.heatmap(my_df[df_key['longitude']], \n", 122 | "# my_df[df_key['latitude']],\n", 123 | "# what=vaex.stat.mean(my_df[df_key['elevation']]),\n", 124 | "# shape=512, \n", 125 | "# figsize=(10,6),\n", 126 | "# limits='minmax',\n", 127 | "# colormap='inferno')" 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "metadata": {}, 133 | "source": [ 134 | "Next we can use *matplotlib* and *cartopy* to plot the decimated data in an interactive map widget." 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": { 141 | "tags": [] 142 | }, 143 | "outputs": [], 144 | "source": [ 145 | "%matplotlib widget\n", 146 | "import vaex\n", 147 | "from ipywidgets import widgets\n", 148 | "import matplotlib.pyplot as plt\n", 149 | "import cartopy.crs as ccrs\n", 150 | "\n", 151 | "plt.figure(figsize=(10,8), dpi= 90)\n", 152 | "ax = plt.axes(projection=ccrs.SouthPolarStereo(central_longitude=0)) \n", 153 | "ax.coastlines(resolution='50m', color='black', linewidth=1)\n", 154 | "ax.set_extent([-180, 180, -65, -90], ccrs.PlateCarree())\n", 155 | "plt.scatter(df_decimated[df_key['longitude']].values,\n", 156 | " df_decimated[df_key['latitude']].values,\n", 157 | " c=df_decimated[df_key['elevation']].values,\n", 158 | " cmap='viridis',\n", 159 | " vmin=100,vmax=200,\n", 160 | " transform=ccrs.PlateCarree())\n", 161 | "plt.colorbar(label='elevation', shrink=0.5, extend='both')" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "## \"Flying\" with the Sensor\n", 169 | "\n", 170 | "This plotting example allows us to \"fly\" along the decimated point cloud data track in a 3D graph environment." 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": { 177 | "tags": [] 178 | }, 179 | "outputs": [], 180 | "source": [ 181 | "%matplotlib widget\n", 182 | "from ipywidgets import widgets\n", 183 | "from ipywidgets import interact, interactive, fixed\n", 184 | "from mpl_toolkits.mplot3d import Axes3D\n", 185 | "import matplotlib.pyplot as plt\n", 186 | "\n", 187 | "\n", 188 | "fig = plt.figure(figsize=(10,6))\n", 189 | "ax = fig.add_subplot(111, projection='3d')\n", 190 | "ax.view_init(70, 70)\n", 191 | "\n", 192 | "#If the data granule(s) is big enough (1+GB), use the decimated dataframe.\n", 193 | "# df = df_decimated\n", 194 | "\n", 195 | "def plot_func(alontrack):\n", 196 | " step = 5000 # same as density\n", 197 | " m = int(alontrack * step)\n", 198 | " ax.clear()\n", 199 | " ax.scatter(df[df_key['longitude']].values[m:m+step],\n", 200 | " df[df_key['latitude']].values[m:m+step],\n", 201 | " df[df_key['elevation']].values[m:m+step],\n", 202 | " c=df[df_key['elevation']].values[m:m+step],\n", 203 | " cmap='viridis', s=1)\n", 204 | " ax.axis('tight')\n", 205 | "\n", 206 | "\n", 207 | "interact(plot_func, alontrack = widgets.FloatSlider(value=0,\n", 208 | " description='Along Track Steps',\n", 209 | " min=0,\n", 210 | " max=90,\n", 211 | " step=0.3,\n", 212 | " layout={'width': '100%'}))" 213 | ] 214 | } 215 | ], 216 | "metadata": { 217 | "kernelspec": { 218 | "display_name": "Python 3 (ipykernel)", 219 | "language": "python", 220 | "name": "python3" 221 | }, 222 | "language_info": { 223 | "codemirror_mode": { 224 | "name": "ipython", 225 | "version": 3 226 | }, 227 | "file_extension": ".py", 228 | "mimetype": "text/x-python", 229 | "name": "python", 230 | "nbconvert_exporter": "python", 231 | "pygments_lexer": "ipython3", 232 | "version": "3.9.16" 233 | } 234 | }, 235 | "nbformat": 4, 236 | "nbformat_minor": 4 237 | } 238 | -------------------------------------------------------------------------------- /notebooks/iceflow/README.md: -------------------------------------------------------------------------------- 1 | # IceFlow Point Cloud Data Access 2 | 3 | > [!CAUTION] 4 | > The IceFlow notebooks and supporting code have some known problems and users 5 | > should exercise caution. It is likely that users will run into errors while 6 | > interacting with the notebooks. Requests for ITRF transformations are not 7 | > currently working as expected. We recommend users look at the `corrections` 8 | > notebook for information about how to apply ITRF transformations to data 9 | > themselves. IceFlow is currently under maintenence, and we hope to resolve 10 | > some of these issues soon. 11 | 12 | ## Summary 13 | 14 | The IceFlow python library simplifies accessing and combining data from several of NASA's cryospheric altimetry missions, including ICESat/GLAS, Operation IceBridge, and ICESat-2. In particular, IceFlow harmonizes the various file formats and georeferencing parameters across several of the missions' data sets, allowing you to analyze data across the multi-decadal time series. 15 | 16 | The contents of the IceFlow folder include the IceFlow library itself, along with several Jupyter Notebooks that provide data access and harmonization using IceFlow. If you are new to IceFlow, we recommend starting at [0_introduction.ipynb](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/notebooks/iceflow/0_introduction.ipynb), which provides a descriptive background on the data, as well as both map widget-based and programmatic-based options for accessing data from IceFlow. 17 | 18 | ## Setup 19 | 20 | To run the notebooks provided in this folder, please see the [NSIDC-Data-Tutorials repository readme](https://github.com/nsidc/NSIDC-Data-Tutorials#readme) for instructions on several ways (using Binder, Docker, or Conda) to do this. 21 | 22 | ## Key Learning Objectives 23 | 24 | 1. Learn the basics about the data sets (pre-IceBridge, IceBridge, ICESat/GLAS and ICESat-2) served by the IceFlow library. 25 | 26 | 2. Learn to access these data sets using the IceFlow user interface widget. 27 | 28 | 3. Learn to access these data sets using the IceFlow API. 29 | 30 | 4. Learn to read and analyze the data using IceFlow. 31 | -------------------------------------------------------------------------------- /notebooks/iceflow/data/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/data/.placeholder -------------------------------------------------------------------------------- /notebooks/iceflow/data/atm1b_data_2020-11-15T20-05.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/data/atm1b_data_2020-11-15T20-05.hdf5 -------------------------------------------------------------------------------- /notebooks/iceflow/data/processed_ATL06_20181015100401_02560110_003_01.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/data/processed_ATL06_20181015100401_02560110_003_01.h5 -------------------------------------------------------------------------------- /notebooks/iceflow/data/twaties-test-GLAH06-2000-2010.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/data/twaties-test-GLAH06-2000-2010.h5 -------------------------------------------------------------------------------- /notebooks/iceflow/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: nsidc-iceflow 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - pangeo-notebook 7 | - matplotlib-base 8 | - ipyleaflet 9 | - geopandas 10 | - h5py 11 | - ipympl 12 | - plotly 13 | - joblib 14 | - cartopy 15 | - vaex 16 | - tqdm 17 | - sidecar 18 | - icepyx 19 | - pip 20 | - pip: 21 | - python-cmr 22 | platforms: 23 | - linux-64 24 | - osx-64 25 | - win-64 26 | -------------------------------------------------------------------------------- /notebooks/iceflow/iceflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/iceflow/__init__.py -------------------------------------------------------------------------------- /notebooks/iceflow/iceflow/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | from uuid import uuid4 3 | import requests 4 | from joblib import Parallel, delayed 5 | from datetime import datetime 6 | from cmr import GranuleQuery 7 | from requests.auth import HTTPBasicAuth 8 | from tqdm import tqdm 9 | from .is2 import is2 10 | 11 | 12 | class IceflowClient: 13 | def __init__(self): 14 | """ 15 | Client class for IceFlow. 16 | """ 17 | self.properties = { 18 | 'start_date': datetime(1993, 1, 1), 19 | 'end_date': datetime.now(), 20 | 'polygon': '', 21 | 'bbox': '' 22 | } 23 | self.session = None 24 | self.icesat2 = None 25 | # IceFlow uses Hermes, the NSIDC data ordering API as a proxy 26 | self.hermes_api_url = 'https://nsidc.org/apps/orders/api' 27 | self.granules = [] 28 | 29 | def valid_session(self): 30 | if self.session is None: 31 | print('You need to login into NASA EarthData before placing an IceFLow Order') 32 | return None 33 | return True 34 | 35 | def authenticate(self, user, password, email): 36 | if user is not None and password is not None: 37 | self.credentials = { 38 | 'username': user, 39 | 'password': password, 40 | 'email': email 41 | } 42 | else: 43 | print('user and password must have valid values') 44 | return None 45 | return self._create_earthdata_authenticated_session() 46 | 47 | def _get_dataset_latest_version(self, dataset): 48 | """ 49 | Returns the latest version of a NSIDC-DAAC dataset 50 | """ 51 | try: 52 | metadata = requests.get(f'http://nsidc.org/api/dataset/metadata/v2/{dataset}.json').json() 53 | latest_version = metadata['newestPublishedMajorVersion'] 54 | except Exception: 55 | print('dataset not found') 56 | return None 57 | if dataset.startswith('ATL'): 58 | version = f"{str(latest_version).zfill(3)}" 59 | else: 60 | version = latest_version 61 | return version 62 | 63 | def bounding_box(self, points): 64 | """ 65 | returns a bbox array for a given polygon 66 | """ 67 | x_coordinates, y_coordinates = zip(*points) 68 | return [(min(x_coordinates), min(y_coordinates)), (max(x_coordinates), max(y_coordinates))] 69 | 70 | def _expand_datasets(self, params): 71 | """ 72 | IceFlow consolidates ATM1B and CMR does not. We need to expand the dataset 73 | names and versions. 74 | """ 75 | cmr_datasets = [] 76 | bbox = [float(coord) for coord in params['bbox'].split(',')] 77 | temporal = (datetime.strptime(params['start'], '%Y-%m-%d'), 78 | datetime.strptime(params['end'], '%Y-%m-%d')) 79 | bbox = (bbox[0], bbox[1], bbox[2], bbox[3]) 80 | for d in params['datasets']: 81 | if d == 'ATM1B': 82 | cmr_datasets.extend([{'name': 'ILATM1B', 83 | 'version': None, 84 | 'temporal': temporal, 85 | 'bounding_box': bbox}, 86 | {'name': 'BLATM1B', 87 | 'version': None, 88 | 'temporal': temporal, 89 | 'bounding_box': bbox}]) 90 | elif d == 'GLAH06' or d == 'ILVIS2': 91 | cmr_datasets.append({'name': d, 92 | 'version': None, 93 | 'temporal': temporal, 94 | 'bounding_box': bbox}) 95 | 96 | else: 97 | latest_version = self._get_dataset_latest_version(d) 98 | cmr_datasets.append({'name': d, 99 | 'version': latest_version, 100 | 'temporal': temporal, 101 | 'bounding_box': bbox}) 102 | return cmr_datasets 103 | 104 | def query_cmr(self, params=None): 105 | """ 106 | Queries CMR for one or more data sets short-names using the spatio-temporal 107 | constraints defined in params. Returns a json list of CMR records. 108 | """ 109 | if params is None: 110 | return None 111 | self.granules = {} 112 | datasets = self._expand_datasets(params) 113 | for d in datasets: 114 | cmr_api = GranuleQuery() 115 | g = cmr_api.parameters( 116 | short_name=d['name'], 117 | version=d['version'], 118 | temporal=d['temporal'], 119 | bounding_box=d['bounding_box']).get_all() 120 | self.granules[d['name']] = g 121 | 122 | self.cmr_download_size(self.granules) 123 | return self.granules 124 | 125 | def cmr_download_size(self, granules): 126 | sizes = {} 127 | for dataset in granules: 128 | size = round(sum(float(g['granule_size']) for g in self.granules[dataset]) / 1024, 2) 129 | sizes[dataset] = size 130 | print(f'{dataset}: {len(self.granules[dataset])} granules found. Approx download size: {size} GB') 131 | return sizes 132 | 133 | def download_cmr_granules(self, dataset, start, end): 134 | """ 135 | Downloads granules (data files) directly form their CMR source. 136 | Note that this can bring a lot of data and is only adviced to be used when you have a small 137 | selection and plenty of space in the hard drive. 138 | """ 139 | if self.granules is None: 140 | return None 141 | file_paths = [] 142 | Parallel(n_jobs=8, backend="threading")( 143 | delayed(self._download_cmr_granule)(g['url'], file_paths) for g in self.granules[dataset][start:end] 144 | ) 145 | return file_paths 146 | 147 | def _download_cmr_granule(self, url, file_paths): 148 | local_filename = url.split('/')[-1] 149 | with requests.get(url, stream=True) as r: 150 | r.raise_for_status() 151 | if not os.path.exists(f'data/{local_filename}'): 152 | with open('data/' + local_filename, 'wb') as f: 153 | for chunk in r.iter_content(chunk_size=8192): 154 | f.write(chunk) 155 | file_paths.append(local_filename) 156 | return local_filename 157 | 158 | def _parse_order_parameters(self, dataset, params): 159 | if dataset in ['ATL03', 'ATL06', 'ATL07', 'ATL08']: 160 | provider = 'icepyx' 161 | else: 162 | if dataset in ['ATM1B', 'ILATM1B', 'BLATM1B']: 163 | dataset = 'ATM1B' # IceFlow consolidates ATM data 164 | provider = 'valkyrie' 165 | 166 | return { 167 | 'dataset': dataset, 168 | 'start': params['start'], 169 | 'end': params['end'], 170 | 'bbox': params['bbox'], 171 | 'provider': provider 172 | } 173 | 174 | def place_data_orders(self, params): 175 | """ 176 | Post a data order to either Iceflow or EGI (Icepyx). 177 | """ 178 | if self.valid_session() is None: 179 | return None 180 | if params is None: 181 | print('You need to pass spatio temporal parameters') 182 | return None 183 | orders = [] 184 | for dataset in params['datasets']: 185 | order_parameters = self._parse_order_parameters(dataset, params) 186 | if order_parameters['provider'] == 'icepyx': 187 | order = self._post_icepyx_order(order_parameters) 188 | else: 189 | order = self._post_iceflow_order(order_parameters) 190 | orders.append(order) 191 | return orders 192 | 193 | def check_order_status(self, order): 194 | if self.valid_session() is None: 195 | return None 196 | if order['provider'] == 'icepyx': 197 | return { 198 | 'status': 'COMPLETE', 199 | 'url': None 200 | } 201 | else: 202 | order_id = order['response']['order']['order_id'] 203 | status_url = f'{self.hermes_api_url}/orders/{order_id}' 204 | response = self.session.get(status_url).json() 205 | status = response['status'].upper() 206 | if status == 'COMPLETE': 207 | granule_url = response['file_urls']['data'][0] 208 | granule_url = granule_url.replace('int.nsidc', 'nsidc') 209 | else: 210 | granule_url = None 211 | response = { 212 | 'status': status, 213 | 'url': granule_url 214 | } 215 | return response 216 | 217 | def _post_icepyx_order(self, params): 218 | """ 219 | Icepyx uses a sync method to download granules, so there is no place order per se, 220 | instead we just query for the granules and then download them. 221 | """ 222 | self.is2_query = self.icesat2.query(params) 223 | return { 224 | 'id': uuid4(), 225 | 'provider': 'icepyx', 226 | 'dataset': params['dataset'], 227 | 'request': params, 228 | 'response': self.is2_query, 229 | 'status': 'PENDING' 230 | } 231 | 232 | def _post_iceflow_order(self, params): 233 | order = {} 234 | username = self.credentials['username'] 235 | if 'provider' in params: 236 | provider = params['provider'] 237 | else: 238 | provider = 'valkyrie' 239 | hermes_params = { 240 | "selection_criteria": { 241 | "filters": { 242 | "dataset_short_name": params['dataset'], 243 | "dataset_version": "1", 244 | "bounding_box": params['bbox'], 245 | "time_start": params['start'], 246 | "time_end": params['end'] 247 | } 248 | }, 249 | "fulfillment": provider, 250 | "delivery": provider, 251 | "uid": f"{username}" 252 | } 253 | if 'itrf' in params: 254 | hermes_params['selection_criteria']['filters']['valkyrie_itrf'] = params['itrf'] 255 | 256 | if 'epoch' in params: 257 | hermes_params['selection_criteria']['filters']['valkyrie_epoch'] = params['epoch'] 258 | 259 | base_url = f'{self.hermes_api_url}/orders/' 260 | self.session.headers['referer'] = 'https://valkyrie.request' 261 | response = self.session.post(base_url, 262 | json=hermes_params, 263 | verify=False).json() 264 | order = { 265 | 'id': response['order']['order_id'], 266 | 'provider': 'iceflow', 267 | 'dataset': params['dataset'], 268 | 'request': hermes_params, 269 | 'response': response, 270 | 'status': 'PENDING' 271 | } 272 | # now we are going to return the response from Hermes 273 | return order 274 | 275 | def _create_earthdata_authenticated_session(self): 276 | s = requests.session() 277 | auth_url = f'{self.hermes_api_url}/earthdata/auth/' 278 | nsidc_resp = s.get(auth_url, timeout=10, allow_redirects=False) 279 | auth_cred = HTTPBasicAuth(self.credentials['username'], self.credentials['password']) 280 | auth_resp = s.get(nsidc_resp.headers['Location'], 281 | auth=auth_cred, 282 | allow_redirects=True, 283 | timeout=10) 284 | 285 | if not (auth_resp.ok): # type: ignore 286 | if auth_resp.status_code == 404 or auth_resp.status_code == 500: 287 | # HERMES bug 288 | self.session = s 289 | # Now we create a icesat2 instance so we can query for ATL data using icepyx 290 | self.icesat2 = is2(self.credentials) 291 | return self.session 292 | 293 | print(nsidc_resp.url) 294 | print(f'Authentication with Earthdata Login failed first with:\n{auth_resp.text}') 295 | return None 296 | 297 | else: # type: ignore 298 | self.session = s 299 | # Now we create a icesat2 instance so we can query for ATL data using icepyx 300 | self.icesat2 = is2(self.credentials) 301 | return self.session 302 | 303 | def h5_filename(self, order): 304 | dataset = order['dataset'] 305 | order_id = order['id'] 306 | today = datetime.today().strftime('%Y%m%d') 307 | return f'{dataset}-{today}-{order_id}' 308 | 309 | def download_orders(self, orders): 310 | for order in orders: 311 | dataset = order['dataset'] 312 | status = self.check_order_status(order) 313 | print(f"dataset: {dataset}, order {order['id']} status is {status['status']}") 314 | if status['status'] == 'COMPLETE' and order['status'] != 'DOWNLOADED': 315 | print(' >> Downloading order...') 316 | data_granule = self.download_order(order) 317 | print(f' >> Order Downloaded: {data_granule}') 318 | elif status['status'] == 'COMPLETE' and order['status'] == 'DOWNLOADED': 319 | print(f"order {order['id']} for {dataset} has been downloaded already") 320 | 321 | def download_order(self, order): 322 | if order['provider'] == 'icepyx' and order['status'] != 'DOWNLOADED': 323 | granules = order['response'].download_granules('./data') 324 | order['status'] = 'DOWNLOADED' 325 | return granules 326 | else: 327 | status = self.check_order_status(order) 328 | filename = self.h5_filename(order) 329 | if status['status'] == 'COMPLETE' and order['status'] != 'DOWNLOADED': 330 | granule = self.download_hdf5(status['url'], filename) 331 | order['status'] = 'DOWNLOADED' 332 | return granule 333 | 334 | def download_hdf5(self, url, file_name=None): 335 | url = url.replace('int.nsidc', 'nsidc') 336 | order_data = requests.get(url, stream=True) 337 | total_size_in_bytes = int(order_data.headers.get('content-length', 0)) 338 | if file_name == '' or file_name is None: 339 | file_name = url.split('/')[-1].replace('.hdf5', '') 340 | # check if file exist. 341 | if os.path.isfile(f'data/{file_name}.h5'): 342 | print('File already downloaded, skipping...') 343 | return None 344 | progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) 345 | # download in 1MB chunks 346 | with open(f'data/{file_name}.h5', 'wb') as f: 347 | for chunk in order_data.iter_content(chunk_size=1048576): 348 | progress_bar.update(len(chunk)) 349 | if chunk: 350 | f.write(chunk) 351 | progress_bar.close() 352 | print(f'Granule downloaded: data/{file_name}.h5') 353 | return f'data/{file_name}.h5' 354 | -------------------------------------------------------------------------------- /notebooks/iceflow/iceflow/is2.py: -------------------------------------------------------------------------------- 1 | import icepyx as ipx 2 | 3 | 4 | class is2: 5 | """ 6 | Icepyx wrapper to place ATL data orders 7 | """ 8 | def __init__(self, credentials): 9 | self.credentials = credentials 10 | 11 | def query(self, parameters): 12 | dataset = parameters['dataset'] 13 | date_range = [parameters['start'], parameters['end']] 14 | bounding_box = [round(float(coord), 4) for coord in parameters['bbox'].split(',')] 15 | query = ipx.Query(dataset, bounding_box, date_range) 16 | query.earthdata_login() 17 | return query 18 | 19 | def simplify_atl03(self, files, variables, filters): 20 | return None 21 | 22 | def simplify_atl06(self, files, variables, filters): 23 | return None 24 | 25 | def simplify_atl07(self, files, variables, filters): 26 | return None 27 | 28 | def simplify_atl08(self, files, variables, filters): 29 | return None 30 | -------------------------------------------------------------------------------- /notebooks/iceflow/iceflow/layers.py: -------------------------------------------------------------------------------- 1 | from ipyleaflet import TileLayer, GeoJSON, projections, basemaps 2 | import pandas as pd 3 | import geopandas 4 | import json 5 | 6 | greenland_velocities = 'https://gibs.earthdata.nasa.gov/wmts/epsg3413/best/' + \ 7 | 'MEaSUREs_Ice_Velocity_Greenland/default/2008-12-01/500m/{z}/{y}/{x}.png' 8 | 9 | itslive_layer = TileLayer(opacity=1.0, 10 | name='Ice Velocities', 11 | url=greenland_velocities, 12 | zoom=1, 13 | min_zoom=1, 14 | max_zoom=4) 15 | 16 | df_north = geopandas.read_file('./iceflow/files/ib_north.json') 17 | df_north['timestamp'] = df_north['timestamp'].dt.tz_convert(None).astype(str) 18 | 19 | df_north['date'] = pd.to_datetime(df_north['timestamp']) 20 | 21 | df_south = geopandas.read_file('./iceflow/files/ib_south.json') 22 | df_south['timestamp'] = df_south['timestamp'].dt.tz_convert(None).astype(str) 23 | 24 | df_south['date'] = pd.to_datetime(df_south['timestamp']) 25 | 26 | 27 | 28 | def filter_layer(df, name, start, end): 29 | if start is None and end is None: 30 | return df[['timestamp', 'geometry']].to_json() 31 | filtered_frame = df[df.date.between(start, end)][['timestamp', 'geometry']].to_json() 32 | geo_layer = GeoJSON( 33 | name=name, 34 | data=json.loads(filtered_frame), 35 | style={ 36 | 'opacity': 0.7, 'dashArray': '8', 'fillOpacity': 0.8, 'weight': 1, 'color': '#8bc34a' 37 | } 38 | ) 39 | return geo_layer 40 | 41 | 42 | def flights_south(start, end): 43 | layer = filter_layer(df_south, 'Flightlines Southern Hemisphere', start, end) 44 | return layer 45 | 46 | 47 | def flights_north(start, end): 48 | layer = filter_layer(df_north, 'Flightlines Northern Hemisphere', start, end) 49 | return layer 50 | 51 | 52 | flight_layers = { 53 | 'north': [flights_north], 54 | 'south': [flights_south], 55 | 'global': [flights_south, flights_north] 56 | } 57 | 58 | custom_layers = { 59 | 'north': [itslive_layer], 60 | 'south': [], 61 | 'global': [] 62 | } 63 | 64 | north_3413 = { 65 | 'name': 'EPSG:3413', 66 | 'custom': True, 67 | 'proj4def': '+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs', 68 | 'origin': [-4194304, 4194304], 69 | 'bounds': [ 70 | [-4194304, -4194304], 71 | [4194304, 4194304] 72 | ], 73 | 'resolutions': [ 74 | 16384.0, 75 | 8192.0, 76 | 4096.0, 77 | 2048.0, 78 | 1024.0, 79 | 512.0, 80 | 256.0 81 | ] 82 | } 83 | 84 | south_3031 = { 85 | 'name': 'EPSG:3031', 86 | 'custom': True, 87 | 'proj4def': '+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs', 88 | 'origin': [-4194304, 4194304], 89 | 'bounds': [ 90 | [-4194304, -4194304], 91 | [4194304, 4194304] 92 | ], 93 | 'resolutions': [ 94 | 16384.0, 95 | 8192.0, 96 | 4096.0, 97 | 2048.0, 98 | 1024.0, 99 | 512.0, 100 | 256.0 101 | ] 102 | } 103 | 104 | widget_projections = { 105 | 'global': { 106 | 'base_map': basemaps.NASAGIBS.BlueMarble, 107 | 'projection': projections.EPSG3857, 108 | 'center': (30, -30), 109 | 'zoom': 2, 110 | 'max_zoom': 8 111 | }, 112 | 'north': { 113 | 'base_map': basemaps.NASAGIBS.BlueMarble3413, 114 | 'projection': north_3413, 115 | 'center': (80, -50), 116 | 'zoom': 1, 117 | 'max_zoom': 4 118 | }, 119 | 'south': { 120 | 'base_map': basemaps.NASAGIBS.BlueMarble3031, 121 | 'projection': south_3031, 122 | 'center': (-90, 0), 123 | 'zoom': 1, 124 | 'max_zoom': 4 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /notebooks/iceflow/iceflow/processing.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from pathlib import PurePosixPath 3 | 4 | import geopandas as gpd 5 | import h5py 6 | import numpy as np 7 | import pandas as pd 8 | 9 | 10 | class IceFlowProcessing: 11 | @staticmethod 12 | def to_geopandas(filename): 13 | with h5py.File(filename, "r") as h5: 14 | if "d_elev" in list(h5.keys()): 15 | # GLAS dataframe 16 | df_data = { 17 | "latitude": h5["d_lat"], 18 | "longitude": h5["d_lon"], 19 | "elevation": h5["d_elev"], 20 | "time": pd.to_datetime(h5["utc_datetime"][:].astype(str)), 21 | } 22 | if "elevation" in list(h5.keys()): 23 | # ATM data 24 | df_data = { 25 | "latitude": h5["latitude"], 26 | "longitude": h5["longitude"], 27 | "elevation": h5["elevation"], 28 | "time": pd.to_datetime(h5["utc_datetime"][:].astype(str)), 29 | } 30 | if "gt1l" in list(h5.keys()): # ICESat-2 31 | # Get dataproduct name 32 | dataproduct = h5.attrs["identifier_product_type"].decode() 33 | # Set variables for each ATL* product 34 | VARIABLES = { 35 | "ATL06": [ 36 | "/gt1l/land_ice_segments/delta_time", 37 | "/gt1l/land_ice_segments/h_li", 38 | "/gt1l/land_ice_segments/latitude", 39 | "/gt1l/land_ice_segments/longitude", 40 | "/gt1r/land_ice_segments/delta_time", 41 | "/gt1r/land_ice_segments/h_li", 42 | "/gt1r/land_ice_segments/latitude", 43 | "/gt1r/land_ice_segments/longitude", 44 | "/gt2l/land_ice_segments/delta_time", 45 | "/gt2l/land_ice_segments/h_li", 46 | "/gt2l/land_ice_segments/latitude", 47 | "/gt2l/land_ice_segments/longitude", 48 | "/gt2r/land_ice_segments/delta_time", 49 | "/gt2r/land_ice_segments/h_li", 50 | "/gt2r/land_ice_segments/latitude", 51 | "/gt2r/land_ice_segments/longitude", 52 | "/gt3l/land_ice_segments/delta_time", 53 | "/gt3l/land_ice_segments/h_li", 54 | "/gt3l/land_ice_segments/latitude", 55 | "/gt3l/land_ice_segments/longitude", 56 | "/gt3r/land_ice_segments/delta_time", 57 | "/gt3r/land_ice_segments/h_li", 58 | "/gt3r/land_ice_segments/latitude", 59 | "/gt3r/land_ice_segments/longitude", 60 | ], 61 | } 62 | # Convert variable paths to 'Path' objects for easy manipulation 63 | variables = [PurePosixPath(v) for v in VARIABLES[dataproduct]] 64 | # Get set of beams to extract individially as dataframes combining in the end 65 | beams = {list(v.parents)[-2].name for v in variables} 66 | dfs = [] 67 | for beam in beams: 68 | data_dict = {} 69 | beam_variables = [v for v in variables if beam in str(v)] 70 | for variable in beam_variables: 71 | # Use variable 'name' as column name. Beam will be specified in 'beam' column 72 | column = variable.name 73 | variable = str(variable) 74 | try: 75 | values = h5[variable][:] 76 | # Convert invalid data to np.nan (only for float columns) 77 | if "float" in str(values.dtype): 78 | if "valid_min" in h5[variable].attrs: 79 | values[ 80 | values < h5[variable].attrs["valid_min"] 81 | ] = np.nan 82 | if "valid_max" in h5[variable].attrs: 83 | values[ 84 | values > h5[variable].attrs["valid_max"] 85 | ] = np.nan 86 | if "_FillValue" in h5[variable].attrs: 87 | values[ 88 | values == h5[variable].attrs["_FillValue"] 89 | ] = np.nan 90 | 91 | data_dict[column] = values 92 | except KeyError: 93 | print(f"Variable {variable} not found in {filename}.") 94 | 95 | df_data = pd.DataFrame.from_dict(data_dict) 96 | dfs.append(df_data) 97 | 98 | df_data = pd.concat(dfs, sort=True) 99 | # Add filename column for book-keeping and reset index 100 | df_data = df_data.reset_index(drop=True) 101 | EPOCH = datetime(2018, 1, 1, 0, 0, 0) 102 | df_data["delta_time"] = df_data["delta_time"].map( 103 | lambda x: EPOCH + timedelta(seconds=x) 104 | ) 105 | df_data.rename( 106 | columns={"delta_time": "time", "h_li": "elevation"}, inplace=True 107 | ) 108 | df_data = df_data[["time", "latitude", "longitude", "elevation"]] 109 | 110 | df = pd.DataFrame(data=df_data) 111 | 112 | geopandas_df = gpd.GeoDataFrame( 113 | df, 114 | geometry=gpd.points_from_xy( 115 | df["longitude"], df["latitude"], crs="epsg:4326" 116 | ), 117 | ) 118 | geopandas_df = geopandas_df.set_index("time") 119 | return geopandas_df 120 | 121 | @staticmethod 122 | def get_common_df(filename): 123 | """ 124 | Returns a minimal pandas dataframe for the different IceFlow datasets with the following keys 125 | latitude,longitude,elevation,time. 126 | Params: hdf_f, an h5py file object 127 | """ 128 | with h5py.File(filename, "r") as h5: 129 | if "d_elev" in list(h5.keys()): 130 | # GLAS dataframe 131 | df_data = { 132 | "latitude": h5["d_lat"], 133 | "longitude": h5["d_lon"], 134 | "elevation": h5["d_elev"], 135 | "time": pd.to_datetime(h5["utc_datetime"]), 136 | } 137 | if "elevation" in list(h5.keys()): 138 | # ATM data 139 | df_data = { 140 | "latitude": h5["latitude"], 141 | "longitude": h5["longitude"], 142 | "elevation": h5["elevation"], 143 | "time": pd.to_datetime(h5["utc_datetime"]), 144 | } 145 | df = pd.DataFrame(data=df_data) 146 | return df 147 | 148 | @staticmethod 149 | def get_common_dictionary(dataset): 150 | """ 151 | Returns a simple dictionary with key values for different datasets 152 | """ 153 | if dataset == "GLAS": 154 | data_dict = { 155 | "latitude": "d_lat", 156 | "longitude": "d_lon", 157 | "elevation": "d_elev", 158 | "time": "utc_datetime", 159 | } 160 | return data_dict 161 | if dataset == "ATM": 162 | # ATM data 163 | data_dict = { 164 | "latitude": "latitude", 165 | "longitude": "longitude", 166 | "elevation": "elevation", 167 | "time": "utc_datetime", 168 | } 169 | return data_dict 170 | -------------------------------------------------------------------------------- /notebooks/iceflow/img/iceflow-coverage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/iceflow-coverage.jpg -------------------------------------------------------------------------------- /notebooks/iceflow/img/log-icons.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/log-icons.png -------------------------------------------------------------------------------- /notebooks/iceflow/img/nsidc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/nsidc_logo.png -------------------------------------------------------------------------------- /notebooks/iceflow/img/unfinished_horse.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/unfinished_horse.jpg -------------------------------------------------------------------------------- /notebooks/iceflow/img/vaex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/vaex.png -------------------------------------------------------------------------------- /notebooks/iceflow/img/vaex_ib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/iceflow/img/vaex_ib.png -------------------------------------------------------------------------------- /notebooks/itslive/data/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/itslive/data/.placeholder -------------------------------------------------------------------------------- /notebooks/itslive/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: nsidc-itslive 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - jupyterlab=3 7 | - fiona 8 | - ipyleaflet 9 | - ipympl 10 | - matplotlib-base 11 | - pandas 12 | - pyproj 13 | - s3fs 14 | - sidecar 15 | - shapely 16 | - xarray 17 | - markdown 18 | - voila 19 | - zarr 20 | platforms: 21 | - linux-64 22 | - osx-64 23 | - win-64 24 | -------------------------------------------------------------------------------- /notebooks/itslive/its_live_antarctic_vel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/itslive/its_live_antarctic_vel.jpg -------------------------------------------------------------------------------- /notebooks/itslive/itslive-notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e54115a3-ecaa-4131-bcb3-77f84831f91f", 6 | "metadata": { 7 | "tags": [] 8 | }, 9 | "source": [ 10 | "
\n", 11 | "

\n", 12 | "

Global Glacier Velocity Point Data Access

\n", 13 | "
" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "id": "330a2f00", 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "%%capture\n", 24 | "%matplotlib widget\n", 25 | "\n", 26 | "import matplotlib\n", 27 | "import markdown \n", 28 | "from ipywidgets import widgets, HTML, Output\n", 29 | "\n", 30 | "from velocity_widget import ITSLIVE\n", 31 | "\n", 32 | "matplotlib.rcParams['figure.figsize'] = [9, 5]\n", 33 | "matplotlib.rcParams[\"figure.autolayout\"] = True\n", 34 | "\n", 35 | "velocity_widget = ITSLIVE()" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "c9ad345d-087a-43e8-bf24-1e0be4ba6565", 41 | "metadata": {}, 42 | "source": [ 43 | "**Instructions**: Click and drag on the map to pan the field of view. Select locations by double-clicking on the map then press Plot. Once plotted you can change the Variable that is being shown and how the markers are colored using Plot By. You can drag individual points after they are placed to relocate them, and then Plot again or Clear markers to start over.\n", 44 | "You can also single-click on the map to populate the Lat and Lon boxes then add a point using the Add Point. Lat and Lon can also be edited manually.\n", 45 | "Hovering your cursor over the plot reveals tools to zoom, pan, and save the figure.\n", 46 | "\n", 47 | "Press Export Data to generate comma separated value (.csv) files of the data. Press Download Data to retrieve locally. Export Data must be pressed each time new data is requested.\n", 48 | "Check out the video tutorial if you're a visual learner:\n", 49 | "\n", 50 | "
\n", 51 | "\n", 52 | "\n", 53 | "Data are Version 2 of the ITS_LIVE global glacier velocity dataset that provides up-to-date velocities from Sentinel-1, Sentinel-2, Landsat-8 and Landsat-9 data. Version 2 annual mosaics are coming soon, and will be followed by Landsat 7 and improved Landsat 9 velocities.\n", 54 | "Please refer to the [project website](https://its-live.jpl.nasa.gov/) for known issues, citation and other product information.\n" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": null, 60 | "id": "cdc8e3f9", 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "dates_range = widgets.SelectionRangeSlider(\n", 65 | " options=[i for i in range(546)],\n", 66 | " index=(1, 120),\n", 67 | " continuous_update=False,\n", 68 | " description='Interval (days): ',\n", 69 | " orientation='horizontal',\n", 70 | " layout={'width': '90%',\n", 71 | " 'display': 'flex'},\n", 72 | " style={'description_width': 'initial'})\n", 73 | "\n", 74 | "variables = widgets.Dropdown(\n", 75 | " options=['v', 'v_error', 'vx', 'vy'],\n", 76 | " description='Variable: ',\n", 77 | " disabled=False,\n", 78 | " value='v',\n", 79 | " layout={'width': '20%',\n", 80 | " 'display': 'flex'},\n", 81 | " style={'description_width': 'initial'})\n", 82 | "\n", 83 | "plot_type = widgets.Dropdown(\n", 84 | " options=['location', 'satellite'],\n", 85 | " description='Plot By: ',\n", 86 | " disabled=False,\n", 87 | " value='location',\n", 88 | " layout={'width': '20%',\n", 89 | " 'display': 'flex'},\n", 90 | " style={'description_width': 'initial'})\n", 91 | "\n", 92 | "plot_button = widgets.Button(\n", 93 | " description='Plot',\n", 94 | " button_style='primary',\n", 95 | " icon='line-chart',\n", 96 | " style={'description_width': 'initial'})\n", 97 | "\n", 98 | "clear_button = widgets.Button(\n", 99 | " description='Clear Points',\n", 100 | " # button_style='warning',\n", 101 | " icon=\"trash\",\n", 102 | " style={'description_width': 'initial'})\n", 103 | "\n", 104 | "latitude = widgets.BoundedFloatText(\n", 105 | " value=0.0,\n", 106 | " min=-90.0,\n", 107 | " max=90.0,\n", 108 | " step=0.1,\n", 109 | " description='Lat: ',\n", 110 | " disabled=False,\n", 111 | " style={'description_width': 'initial'},\n", 112 | " layout={'width': '20%',\n", 113 | " 'display': 'flex'},\n", 114 | ")\n", 115 | "\n", 116 | "longitude = widgets.BoundedFloatText(\n", 117 | " value=0.0,\n", 118 | " min=-180.0,\n", 119 | " max=180.0,\n", 120 | " step=0.1,\n", 121 | " description='Lon: ',\n", 122 | " disabled=False,\n", 123 | " style={'description_width': 'initial'},\n", 124 | " layout={'width': '20%',\n", 125 | " 'display': 'flex'},\n", 126 | ")\n", 127 | "\n", 128 | "add_button = widgets.Button(\n", 129 | " description='Add Point',\n", 130 | " # button_style='info',\n", 131 | " icon=\"map-marker\",\n", 132 | " style={'description_width': 'initial'})\n", 133 | "\n", 134 | "include_running_mean = widgets.Checkbox(\n", 135 | " value=False,\n", 136 | " description=\"Include Running Mean\",\n", 137 | " style={'description_width': 'initial'},\n", 138 | " disabled=False,\n", 139 | " indent=False,\n", 140 | " tooltip=\"Plot running mean through each time series\",\n", 141 | " layout=widgets.Layout(width=\"25%\"),\n", 142 | " )\n", 143 | "\n", 144 | "export_button = widgets.Button(\n", 145 | " description='Export Data',\n", 146 | " # button_style='info',\n", 147 | " icon=\"file-export\",\n", 148 | " style={'description_width': 'initial'})\n", 149 | "\n", 150 | "data_link = widgets.HTML(\n", 151 | " value=\"
\"\n", 152 | ")\n", 153 | "\n", 154 | "# If this congiguration changes we need to rerun the cell.\n", 155 | "config = { \n", 156 | " \"plot\": \"v\", # or other ITS_LIVE variables: vx, vy ...\n", 157 | " \"min_separation_days\": 1,\n", 158 | " \"max_separation_days\": 90,\n", 159 | " \"color_by\": \"location\", # valid values: satellite, points\n", 160 | " \"verbose\": True, # print operations\n", 161 | " \"runnig_mean\": True,\n", 162 | " \"coords\": {\n", 163 | " \"latitude\": latitude,\n", 164 | " \"longitude\": longitude\n", 165 | " },\n", 166 | " \"data_link\": data_link\n", 167 | "}\n", 168 | "\n", 169 | "\n", 170 | "plot_button.on_click(velocity_widget.plot_time_series)\n", 171 | "clear_button.on_click(velocity_widget.clear_points)\n", 172 | "\n", 173 | "def update_variable(change):\n", 174 | " if change['type'] == 'change' and change['name'] == 'value':\n", 175 | " config[\"plot\"] = variables.value\n", 176 | " velocity_widget.set_config(config)\n", 177 | " velocity_widget.plot_time_series()\n", 178 | " \n", 179 | "def update_range(change):\n", 180 | " if change['type'] == 'change' and change['name'] == 'value':\n", 181 | " start, end = change['new']\n", 182 | " config[\"min_separation_days\"] = start\n", 183 | " config[\"max_separation_days\"] = end\n", 184 | " velocity_widget.set_config(config)\n", 185 | " velocity_widget.plot_time_series()\n", 186 | " \n", 187 | "def update_plottype(change):\n", 188 | " if change['type'] == 'change' and change['name'] == 'value':\n", 189 | " config[\"color_by\"] = plot_type.value\n", 190 | " velocity_widget.set_config(config)\n", 191 | " velocity_widget.plot_time_series()\n", 192 | " \n", 193 | "def update_mean(change):\n", 194 | " if change['type'] == 'change' and change['name'] == 'value':\n", 195 | " config[\"running_mean\"] = include_running_mean.value\n", 196 | " velocity_widget.set_config(config)\n", 197 | " velocity_widget.plot_time_series()\n", 198 | " \n", 199 | "def add_point(event):\n", 200 | " coords = (latitude.value, longitude.value)\n", 201 | " velocity_widget.add_point(coords)\n", 202 | " \n", 203 | "def export_ts(event):\n", 204 | " velocity_widget.export_data()\n", 205 | "\n", 206 | "\n", 207 | "export_button.on_click(export_ts)\n", 208 | "\n", 209 | "\n", 210 | "add_button.on_click(add_point)\n", 211 | "dates_range.observe(update_range, 'value')\n", 212 | "plot_type.observe(update_plottype, 'value')\n", 213 | "variables.observe(update_variable, 'value')\n", 214 | "include_running_mean.observe(update_mean, 'value')\n", 215 | "\n", 216 | "layout = widgets.Layout(align_items='stretch',\n", 217 | " display='flex',\n", 218 | " flex_flow='row wrap',\n", 219 | " border='none',\n", 220 | " grid_template_columns=\"repeat(auto-fit, minmax(720px, 1fr))\",\n", 221 | " # grid_template_columns='48% 48%',\n", 222 | " width='99%',\n", 223 | " height='100%')\n", 224 | "\n", 225 | "velocity_widget.set_config(config)\n", 226 | "\n", 227 | "velocity_widget.fig.canvas.capture_scroll = True\n", 228 | "\n", 229 | "# We render the widget\n", 230 | "widgets.GridBox([\n", 231 | " widgets.VBox([velocity_widget.map,\n", 232 | " widgets.HBox([latitude, longitude, add_button, clear_button], layout=widgets.Layout(align_items=\"flex-start\",\n", 233 | " flex_flow='row wrap')),\n", 234 | " dates_range,\n", 235 | " widgets.HBox([plot_button, variables, plot_type, include_running_mean], layout=widgets.Layout(justify_content=\"flex-start\",\n", 236 | " flex_flow='row wrap'))],\n", 237 | " layout=widgets.Layout(min_width=\"100%\",\n", 238 | " display=\"flex\",\n", 239 | " # height=\"100%\",\n", 240 | " # max_height=\"100%\",\n", 241 | " max_width=\"100%\")),\n", 242 | " widgets.VBox([\n", 243 | " velocity_widget.fig.canvas,\n", 244 | " widgets.HBox([export_button, data_link])\n", 245 | " ], layout=widgets.Layout(min_width=\"720px\",\n", 246 | " overflow='scroll',\n", 247 | " max_width=\"100%\",\n", 248 | " display='flex'))],\n", 249 | " layout=layout)" 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": null, 255 | "id": "9d134742-c10f-47ce-9930-cf049e37b2f0", 256 | "metadata": {}, 257 | "outputs": [], 258 | "source": [ 259 | "# we can access the zarr cubes via \n", 260 | "cubes = velocity_widget.dct.open_cubes\n", 261 | "# (-179190.32201768592, -2109388.6283777533)\n", 262 | "# then load them with xarray e.g.\n", 263 | "# ds = cubes[\"s3://its-live-data/datacubes/v02/N70W040/ITS_LIVE_vel_EPSG3413_G0120_X-150000_Y-2150000.zarr\"]" 264 | ] 265 | } 266 | ], 267 | "metadata": { 268 | "kernelspec": { 269 | "display_name": "Python 3 (ipykernel)", 270 | "language": "python", 271 | "name": "python3" 272 | }, 273 | "language_info": { 274 | "codemirror_mode": { 275 | "name": "ipython", 276 | "version": 3 277 | }, 278 | "file_extension": ".py", 279 | "mimetype": "text/x-python", 280 | "name": "python", 281 | "nbconvert_exporter": "python", 282 | "pygments_lexer": "ipython3", 283 | "version": "3.9.12" 284 | } 285 | }, 286 | "nbformat": 4, 287 | "nbformat_minor": 5 288 | } 289 | -------------------------------------------------------------------------------- /notebooks/itslive/itslive-widget.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "330a2f00", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "%%capture\n", 11 | "%matplotlib widget\n", 12 | "\n", 13 | "import matplotlib\n", 14 | "import markdown \n", 15 | "from ipywidgets import widgets, HTML, Output\n", 16 | "\n", 17 | "from velocity_widget import ITSLIVE\n", 18 | "\n", 19 | "matplotlib.rcParams['figure.figsize'] = [6, 4]\n", 20 | "matplotlib.rcParams[\"figure.autolayout\"] = True\n", 21 | "\n", 22 | "velocity_widget = ITSLIVE()" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "1905eb53-c87a-425a-beb3-115720d58af8", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "html_title = markdown.markdown(\"\"\"\n", 33 | "\n", 34 | "
\n", 35 | "

\n", 36 | "

Global Glacier Velocity Point Data Access

\n", 37 | "
\n", 38 | "\n", 39 | "***\n", 40 | "\"\"\")\n", 41 | "\n", 42 | "\n", 43 | "html_instructions = markdown.markdown(\"\"\" Click and drag on the map to pan the field of view. Select locations by double-clicking on the map then press Plot. Once plotted you can change the Variable that is being shown and how the markers are colored using Plot By. You can drag individual points after they are placed to relocate them, and then Plot again or Clear markers to start over.\n", 44 | "You can also single-click on the map to populate the Lat and Lon boxes then add a point using the Add Point. Lat and Lon can also be edited manually.\n", 45 | "Hovering your cursor over the plot reveals tools to zoom, pan, and save the figure.\n", 46 | "\n", 47 | "Press Export Data to generate comma separated value (.csv) files of the data. Press Download Data to retrieve locally. Export Data must be pressed each time new data is requested.\n", 48 | "Check out the video tutorial if you're a visual learner:\n", 49 | "\n", 50 | "
\n", 51 | "\n", 52 | "Data are Version 2 of the ITS_LIVE global glacier velocity dataset that provides up-to-date velocities from Sentinel-1, Sentinel-2, Landsat-8 and Landsat-9 data. Version 2 annual mosaics are coming soon, and will be followed by Landsat 7 and improved Landsat 9 velocities.\n", 53 | "Please refer to the [project website](https://its-live.jpl.nasa.gov/) for known issues, citation and other product information.\n", 54 | "\"\"\")\n", 55 | "\n", 56 | "\n", 57 | "title = HTML(html_title, layout=widgets.Layout(width=\"100%\",\n", 58 | " display='flex',\n", 59 | " align_items='stretch'))\n", 60 | "instructions = widgets.Accordion(children=[HTML(html_instructions)], selected_index=None)\n", 61 | "instructions.set_title(0,title=\"Instructions\")" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "id": "cdc8e3f9", 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "\n", 72 | "dates_range = widgets.SelectionRangeSlider(\n", 73 | " options=[i for i in range(546)],\n", 74 | " index=(1, 120),\n", 75 | " continuous_update=False,\n", 76 | " description='Interval (days): ',\n", 77 | " orientation='horizontal',\n", 78 | " layout={'width': '90%',\n", 79 | " 'display': 'flex'},\n", 80 | " style={'description_width': 'initial'})\n", 81 | "\n", 82 | "variables = widgets.Dropdown(\n", 83 | " options=['v', 'v_error', 'vx', 'vy'],\n", 84 | " description='Variable: ',\n", 85 | " disabled=False,\n", 86 | " value='v',\n", 87 | " layout={'width': '20%',\n", 88 | " 'display': 'flex'},\n", 89 | " style={'description_width': 'initial'})\n", 90 | "\n", 91 | "plot_type = widgets.Dropdown(\n", 92 | " options=['location', 'satellite'],\n", 93 | " description='Plot By: ',\n", 94 | " disabled=False,\n", 95 | " value='location',\n", 96 | " layout={'width': '20%',\n", 97 | " 'display': 'flex'},\n", 98 | " style={'description_width': 'initial'})\n", 99 | "\n", 100 | "plot_button = widgets.Button(\n", 101 | " description='Plot',\n", 102 | " button_style='primary',\n", 103 | " icon='line-chart',\n", 104 | " style={'description_width': 'initial'})\n", 105 | "\n", 106 | "clear_button = widgets.Button(\n", 107 | " description='Clear Points',\n", 108 | " # button_style='warning',\n", 109 | " icon=\"trash\",\n", 110 | " style={'description_width': 'initial'})\n", 111 | "\n", 112 | "latitude = widgets.BoundedFloatText(\n", 113 | " value=0.0,\n", 114 | " min=-90.0,\n", 115 | " max=90.0,\n", 116 | " step=0.1,\n", 117 | " description='Lat: ',\n", 118 | " disabled=False,\n", 119 | " style={'description_width': 'initial'},\n", 120 | " layout={'width': '20%',\n", 121 | " 'display': 'flex'},\n", 122 | ")\n", 123 | "\n", 124 | "longitude = widgets.BoundedFloatText(\n", 125 | " value=0.0,\n", 126 | " min=-180.0,\n", 127 | " max=180.0,\n", 128 | " step=0.1,\n", 129 | " description='Lon: ',\n", 130 | " disabled=False,\n", 131 | " style={'description_width': 'initial'},\n", 132 | " layout={'width': '20%',\n", 133 | " 'display': 'flex'},\n", 134 | ")\n", 135 | "\n", 136 | "add_button = widgets.Button(\n", 137 | " description='Add Point',\n", 138 | " # button_style='info',\n", 139 | " icon=\"map-marker\",\n", 140 | " style={'description_width': 'initial'})\n", 141 | "\n", 142 | "include_running_mean = widgets.Checkbox(\n", 143 | " value=False,\n", 144 | " description=\"Include Running Mean\",\n", 145 | " style={'description_width': 'initial'},\n", 146 | " disabled=False,\n", 147 | " indent=False,\n", 148 | " tooltip=\"Plot running mean through each time series\",\n", 149 | " layout=widgets.Layout(width=\"25%\"),\n", 150 | " )\n", 151 | "\n", 152 | "export_button = widgets.Button(\n", 153 | " description='Export Data',\n", 154 | " # button_style='info',\n", 155 | " icon=\"file-export\",\n", 156 | " style={'description_width': 'initial'})\n", 157 | "\n", 158 | "data_link = widgets.HTML(\n", 159 | " value=\"
\"\n", 160 | ")\n", 161 | "\n", 162 | "# If this congiguration changes we need to rerun the cell.\n", 163 | "config = { \n", 164 | " \"plot\": \"v\", # or other ITS_LIVE variables: vx, vy ...\n", 165 | " \"min_separation_days\": 1,\n", 166 | " \"max_separation_days\": 90,\n", 167 | " \"color_by\": \"location\", # valid values: satellite, points\n", 168 | " \"verbose\": True, # print operations\n", 169 | " \"runnig_mean\": True,\n", 170 | " \"coords\": {\n", 171 | " \"latitude\": latitude,\n", 172 | " \"longitude\": longitude\n", 173 | " },\n", 174 | " \"data_link\": data_link\n", 175 | "}\n", 176 | "\n", 177 | "\n", 178 | "plot_button.on_click(velocity_widget.plot_time_series)\n", 179 | "clear_button.on_click(velocity_widget.clear_points)\n", 180 | "\n", 181 | "def update_variable(change):\n", 182 | " if change['type'] == 'change' and change['name'] == 'value':\n", 183 | " config[\"plot\"] = variables.value\n", 184 | " velocity_widget.set_config(config)\n", 185 | " velocity_widget.plot_time_series()\n", 186 | " \n", 187 | "def update_range(change):\n", 188 | " if change['type'] == 'change' and change['name'] == 'value':\n", 189 | " start, end = change['new']\n", 190 | " config[\"min_separation_days\"] = start\n", 191 | " config[\"max_separation_days\"] = end\n", 192 | " velocity_widget.set_config(config)\n", 193 | " velocity_widget.plot_time_series()\n", 194 | " \n", 195 | "def update_plottype(change):\n", 196 | " if change['type'] == 'change' and change['name'] == 'value':\n", 197 | " config[\"color_by\"] = plot_type.value\n", 198 | " velocity_widget.set_config(config)\n", 199 | " velocity_widget.plot_time_series()\n", 200 | " \n", 201 | "def update_mean(change):\n", 202 | " if change['type'] == 'change' and change['name'] == 'value':\n", 203 | " config[\"running_mean\"] = include_running_mean.value\n", 204 | " velocity_widget.set_config(config)\n", 205 | " velocity_widget.plot_time_series()\n", 206 | " \n", 207 | "def add_point(event):\n", 208 | " coords = (latitude.value, longitude.value)\n", 209 | " velocity_widget.add_point(coords)\n", 210 | " \n", 211 | "def export_ts(event):\n", 212 | " velocity_widget.export_data()\n", 213 | "\n", 214 | "\n", 215 | "export_button.on_click(export_ts)\n", 216 | "\n", 217 | "\n", 218 | "add_button.on_click(add_point)\n", 219 | "dates_range.observe(update_range, 'value')\n", 220 | "plot_type.observe(update_plottype, 'value')\n", 221 | "variables.observe(update_variable, 'value')\n", 222 | "include_running_mean.observe(update_mean, 'value')\n", 223 | "\n", 224 | "layout = widgets.Layout(align_items='stretch',\n", 225 | " display='flex',\n", 226 | " flex_flow='row wrap',\n", 227 | " border='none',\n", 228 | " grid_template_columns=\"repeat(auto-fit, minmax(420px, 1fr))\",\n", 229 | " # grid_template_columns='48% 48%',\n", 230 | " width='99%',\n", 231 | " height='100%')\n", 232 | "\n", 233 | "velocity_widget.set_config(config)\n", 234 | "\n", 235 | "velocity_widget.fig.canvas.capture_scroll = True\n", 236 | "\n", 237 | "\n", 238 | "widgets.GridBox([widgets.VBox([title,\n", 239 | " instructions], layout=widgets.Layout(width=\"100%\")),\n", 240 | " widgets.VBox([velocity_widget.map,\n", 241 | " widgets.HBox([latitude, longitude, add_button, clear_button], layout=widgets.Layout(align_items=\"flex-start\",\n", 242 | " flex_flow='row wrap')),\n", 243 | " dates_range,\n", 244 | " widgets.HBox([plot_button, variables, plot_type, include_running_mean], layout=widgets.Layout(justify_content=\"flex-start\",\n", 245 | " flex_flow='row wrap'))],\n", 246 | " layout=widgets.Layout(min_width=\"420px\",\n", 247 | " # display=\"flex\",\n", 248 | " # height=\"100%\",\n", 249 | " # max_height=\"100%\",\n", 250 | " max_width=\"100%\")),\n", 251 | " widgets.VBox([\n", 252 | " velocity_widget.fig.canvas,\n", 253 | " widgets.HBox([export_button, data_link])\n", 254 | " ], layout=widgets.Layout(min_width=\"420px\",\n", 255 | " overflow='scroll',\n", 256 | " max_width=\"100%\",\n", 257 | " display='flex'))],\n", 258 | " layout=layout)" 259 | ] 260 | } 261 | ], 262 | "metadata": { 263 | "kernelspec": { 264 | "display_name": "Python 3 (ipykernel)", 265 | "language": "python", 266 | "name": "python3" 267 | }, 268 | "language_info": { 269 | "codemirror_mode": { 270 | "name": "ipython", 271 | "version": 3 272 | }, 273 | "file_extension": ".py", 274 | "mimetype": "text/x-python", 275 | "name": "python", 276 | "nbconvert_exporter": "python", 277 | "pygments_lexer": "ipython3", 278 | "version": "3.9.13" 279 | } 280 | }, 281 | "nbformat": 4, 282 | "nbformat_minor": 5 283 | } 284 | -------------------------------------------------------------------------------- /notebooks/measures/README.md: -------------------------------------------------------------------------------- 1 | ## Search, Download and Plot multiple GeoTIFFs 2 | 3 | ### Summary 4 | In this tutorial we demonstrate how to programmatically access and download GeoTIFF files from the NSIDC DAAC data to your local computer. We then walk through the steps for cropping and resampling one GeoTIFF based on the extent and pixel size of another GeoTIFF, with the end goal of plotting one on top of the other. 5 | 6 | We use two data sets from the NASA [MEaSUREs](https://nsidc.org/data/measures) (Making Earth System data records for Use in Research Environments) program as an example: 7 | 8 | * [MEaSUREs Greenland Ice Mapping Project (GrIMP) Digital Elevation Model from GeoEye and WorldView Imagery, Version 2 (NSIDC-0715)](https://nsidc.org/data/nsidc-0715/versions/2) 9 | * [MEaSUREs Greenland Ice Velocity: Selected Glacier Site Velocity Maps from InSAR, Version 4 (NSIDC-0481)](https://nsidc.org/data/nsidc-0481/versions/4) 10 | 11 | 12 | ### Set up 13 | 14 | To run the notebook provided in this folder, please see the [NSIDC-Data-Tutorials repository readme](https://github.com/nsidc/NSIDC-Data-Tutorials#readme) for instructions on several ways (using Binder, Docker, or Conda) to do this. 15 | 16 | ### Key Learning Objectives 17 | 18 | 1. Use the `earthaccess` library for authentication and to programmatically search for and download NSIDC DAAC data that meet specified spatial and temporal requirements. 19 | 2. Use the `gdal` and `osr` modules from the `osgeo` package to crop and resample one GeoTIFF based on the extent and pixel size of another GeoTIFF. 20 | 3. Use `rasterio`, `numpy` and `matplotlib` libraries to overlay one GeoTIFF on top of another. 21 | -------------------------------------------------------------------------------- /notebooks/measures/environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: measures 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.9 6 | - gdal=3.6 7 | - matplotlib-base>=3.5 8 | - numpy=1.23.5 9 | - earthaccess>=0.5.1 10 | - rasterio~=1.3.6 11 | - affine 12 | -------------------------------------------------------------------------------- /notebooks/measures/img/example_geotiff_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/measures/img/example_geotiff_plot.png -------------------------------------------------------------------------------- /notebooks/measures/img/nsidc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/notebooks/measures/img/nsidc_logo.png -------------------------------------------------------------------------------- /pedagogy_guide.md: -------------------------------------------------------------------------------- 1 | # Pedagogy Best Practices 2 | Pedagogy is commonly understood as an approach to teaching. The "Best Practices" listed below are meant as a guide 3 | to developing Jupyter Notebooks for tutorials. These tutorials may be self-guided tutorials intended for users to work through 4 | on their own; or tutorials taught by an instructor in a workshop or hackathon environment. 5 | 6 | The "Best Practices" draw on ideas from the [Carpentries](https://software-carpentry.org/) and also include some ideas on 7 | content from [divio.com](divio.com) and [EarthCube notebooks](https://www.earthcube.org/notebooks). 8 | 9 | ### Have a well defined purpose for your notebook 10 | What is the purpose of the tutorial? Each notebook should have a clear description of what it covers. Focusing on one task 11 | is better than trying to cover multiple tasks. See also [Avoid Cognitive Overload](#avoid-cognitive-overload). 12 | 13 | Describe the purpose or aim of the notebook as an introduction. If the aim is to produce a plot, include a PNG of the plot to show 14 | the end result. 15 | 16 | ### Enumerate learning objectives 17 | List the learning objectives after the notebook description. 18 | 19 | ### Consider your audience 20 | Are they beginners with no experience of writing code, or are they experienced coders who just want to find the best way to read some data. 21 | 22 | ### Consider the duration of the tutorial 23 | Try to keep the time required to complete a notebook short. This speaks to focusing on one task. Users may give-up or loose interest if notebooks require too much time. 24 | Thirty minutes is a good duration to aim for. 25 | 26 | ### Consider the aim/objective of the note book 27 | Is the notebook a Tutorial to teach a concept or tool or a How To Guide to show how to perform a task? Is the notebook intended to be self guided or taught by an instructor in a workshop? 28 | 29 | [divio.com](https://documentation.divio.com/structure/) provide a good overview of the differences between Tutorials and How-To Guides; both describe practical steps but Tutorials are designed to help users to understand concepts, whereas How-To Guides are more problem orientated and designed to solve problems that come up during working. 30 | 31 | ### Avoid Cognitive Overload 32 | Avoid cognitive overload by focusing notebook content on one main task. Break complex notebooks into several notebooks. 33 | Not the only or best guide but useful. https://blog.innerdrive.co.uk/4-ways-to-overcome-cognitive-overload?hs_amp=true 34 | Notebooks should be for a single - short - task. This avoids cognitive overload. 35 | 36 | ### Follow best practices for coding but avoid overly complicated code. 37 | Aim to follow best practices for coding but also avoid overly complicated code that obscures the teaching aim. For example, there is no need to 38 | include code to manage errors and exceptions. 39 | 40 | Code needs to be understandable rather than efficient. Avoiding trying to write code as a 'developer'. 41 | 42 | ### Avoid long code blocks 43 | If a code block is so long that users cut-and-paste the code or press shift-enter then they are not learning, just copying. The Carpentries advocate live-coding, 44 | where an instructor types code in real-time, correcting mistakes as they go and describing what they are doing. This approach helps with pacing and length of content. 45 | 46 | While most useful in workshops, thinking of writing code live, even for self-guided tutorials, will help keep coding manageable and the cognitive load manageable. 47 | 48 | Short code blocks also help break tasks into understandable steps. Think in terms of what steps do we need to understand to complete a task. 49 | 50 | ### Use standard or well-established packages (unless introducing a new package) 51 | Aim to use packages either from the standard library or well-established packages. Obscure packages or packages that are no longer 52 | maintained may introduce too many dependencies or end up breaking code. The only exception here is if we are introducing a new package. 53 | 54 | ### Use Live Coding for In-Person Tutorials 55 | Live coding is where an instructor types code in real-time, talking through what they are doing, instead of cutting-and-pasting or just running code blocks. Participants follow along also typing code. This approach has advantages for pacing. An instructor can only go as quickly as they can type. Students get practice typing code. Students also get to see mistakes, error messages, and solutions. 56 | 57 | See this [article](https://carpentries.github.io/instructor-training/17-live/) from the Carpentries for more on live coding. 58 | 59 | ### Use small data sets to reduce download time and make it quicker for a user to work through the tutorial 60 | Users want to focus on learning how to perform tasks and not spend a lot of time downloading data. Select sample datasets and files that take a __maximum of 3 minutes__ to download. In some caes, you may have to create files and datasets that are subsets of NSIDC hosted data. 61 | 62 | Download times may vary depending on the environment and platform where tutorials are being executed. Tutorials designed for use in a cloud instance may be able to use larger datasets. 63 | 64 | ### Avoid writing helper functions in a separate module 65 | Relying on helper functions stored in a separate module will hide what we are trying to teach and could also decrease the ability of users to recreate code. 66 | 67 | The only exception here is if we are teaching writing code. 68 | 69 | ## Resources 70 | [Carpentries instructor training](https://carpentries.github.io/instructor-training/) 71 | [divio.com documentation guide](divio.com) 72 | [EarthCube peer-reviewed notebook examples](https://www.earthcube.org/notebooks) 73 | -------------------------------------------------------------------------------- /tutorial_templates/img/example_end_product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/tutorial_templates/img/example_end_product.png -------------------------------------------------------------------------------- /tutorial_templates/img/nsidc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nsidc/NSIDC-Data-Tutorials/f14334c5d1acf75616fbe4480c7500a07a7d2a65/tutorial_templates/img/nsidc_logo.png -------------------------------------------------------------------------------- /tutorial_templates/tutorial_template.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e86eaecf-a612-4dbb-8bdc-5b5dfddf65b9", 6 | "metadata": {}, 7 | "source": [ 8 | "
\n", 9 | "\n", 10 | "\n", 11 | "# **Tutorial Title**\n", 12 | "\n", 13 | "
\n", 14 | "\n", 15 | "---\n", 16 | "\n", 17 | "*This template is designed for you to make a copy and replace the text and sample code in each section with your own, while following the general format outlined here. You will likely have a different number of cells in each section, so feel free to add and remove. The template is meant as a guide for which content is important to include.*\n", 18 | "\n", 19 | "**Reminders:** \n", 20 | " \n", 21 | "**Please read our [Contribution Guide](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/contributor_guide.md) and our [Pedagogy Guide](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/pedagogy_guide.md) before creating your tutorial. The pedagogy guide outlines best practices for creating and delivering code-based tutorials in a notebook format.**\n", 22 | "\n", 23 | "**We recommend that before starting to develop the tutorial, it is a good idea to review the libraries that are in already in the [tutorials environment](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/binder/environment.yml) and to use these within your new tutorial if possible. If the tutorial includes the use of a new package or library, consult with the NSIDC Data Use and Education team via Github Issue on whether an existing library can be used instead. If not, work with NSIDC to modify the environment yml file.**" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "id": "4101ae06-3984-435c-abcc-f6346d15069b", 29 | "metadata": {}, 30 | "source": [ 31 | "## **1. Tutorial Introduction/Overview**\n", 32 | "\n", 33 | "Provide an overview of the tutorial here, including any relevant background material and the motivation for creating the tutorial. Indicate whether the tutorial was designed to be self-led or presented at a hackweek or conference workshop. \n" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "dd6c0128-efe4-4fab-8721-55fc366e3c7e", 39 | "metadata": {}, 40 | "source": [ 41 | "### **Credits**\n", 42 | "\n", 43 | "List the contributors to the tutorial.\n", 44 | "\n", 45 | "Include the following statement:\n", 46 | "\n", 47 | "For questions regarding the notebook, or to report problems, please create a new issue in the [NSIDC-Data-Tutorials repo](https://github.com/nsidc/NSIDC-Data-Tutorials/issues)." 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "id": "a57c664e-76f9-416e-ae03-75dce51b3cb7", 53 | "metadata": {}, 54 | "source": [ 55 | "### **Objectives/Learning Goals/Outcomes**\n", 56 | "\n", 57 | "List the objectives and/or skills gained upon completion of the tutorial.\n", 58 | "\n", 59 | "*After completing this notebook you will be able to...* " 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "id": "015703a9-f02a-42f4-8ff0-3b002bf4f2f5", 65 | "metadata": {}, 66 | "source": [ 67 | "### **Prerequisites/Knowledge Requirements**\n", 68 | "\n", 69 | "Please describe necessary set-up steps required before running the notebook. \n", 70 | "\n", 71 | "Include guidance for using the tutorials environment and point to the main repo [README](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/README.md) for setup instructions.\n", 72 | "\n", 73 | "Indicate whether an Earthdata Login is required for data access, and include the [link](https://urs.earthdata.nasa.gov/) for registration. Current recommended practice for authentication is to create a .netrc file in your home directory following [these instructions](https://nsidc.org/support/how/how-do-i-programmatically-request-data-services) (Step 1) and to use the .netrc file for authentication when required for data access during the tutorial.\n", 74 | "\n", 75 | "\n", 76 | "Include a list of knowledge requirements or skills that would be necessary or useful for successful completion or comprehension of the tutorial. Some examples of useful information for tutorial users include the following:\n", 77 | "\n", 78 | "*To get the most out of this tutorial notebook, you should be familiar with the following concepts/data sets/programming languages...*\n", 79 | "\n", 80 | "*The main packages/libraries that will be used in this notebook are...*\n", 81 | "\n", 82 | "*The GIS concepts applied in this tutorial are...*" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "id": "b130c74f-85e6-41dd-a7c9-3249b3dee113", 88 | "metadata": {}, 89 | "source": [ 90 | "### **Example of end product (recommended, not required)** \n", 91 | "\n", 92 | "Include a figure that illustrates the end product of the notebook. This could be a data plot, map or some other type of visualization.\n", 93 | "\n", 94 | "Please include figures in an \"img\" folder located at the same level as the notebook within your tutorial folder.\n", 95 | "\n", 96 | "
\n", 97 | "\n", 98 | "
" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "id": "c45f3276-1172-4bfb-8389-e9d3cbbe88f4", 104 | "metadata": {}, 105 | "source": [ 106 | "### **Time requirement**\n", 107 | "\n", 108 | "Provide an estimate of the amount of time required to complete the tutorial." 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "id": "53b77eb5-d5ed-4ddd-8fb1-6c69618d7852", 114 | "metadata": {}, 115 | "source": [ 116 | "## **2. Tutorial steps**" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "id": "7820a737-33f0-4470-b9a4-03c5c4f0354c", 122 | "metadata": {}, 123 | "source": [ 124 | "### **Import libraries and classes**\n", 125 | "\n", 126 | "Import the libraries/packages and tools that will be needed to run the code in the tutorial. It can be really helpful to include comments next to each import indicating usage." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "59e79729-1b02-4ef5-aee1-8923690243da", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "# Importing libraries\n", 137 | "\n", 138 | "from import # comment describing what the library/tool is used for in the code" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "id": "1966ffa6-a5f2-4520-a8dc-f37678a2cf7a", 144 | "metadata": {}, 145 | "source": [ 146 | "### **Authentication/Earthdata Login (optional - for NASA Earthdata only)**\n", 147 | "\n", 148 | "Different libraries will have different methods for authenticating via Earthdata Login. The following cell contains an example of the authentication process used by the [earthaccess python client for NASA CMR](https://github.com/nsidc/earthdata) (if you are using a different library, yours will be different). The process requires having created a .netrc file in your home directory following [these instructions](https://nsidc.org/support/how/how-do-i-programmatically-request-data-services) (Step 1). The earthaccess library's \"login\" method is called while passing the argument \"strategy=netrc\". If no .netrc file is found, the method will provide interactive cells for entering Earthdata Login credentials within the notebook." 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "id": "d47aa955-3d91-4418-85f9-5772f400f712", 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "#example authentication\n", 159 | "\n", 160 | "auth = Auth().login(strategy='netrc')\n", 161 | "if auth.authenticated is False:\n", 162 | " auth = Auth().login(strategy='interactive')" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "id": "95e2532d-219b-4b9d-b5b9-b43c95b1aa7d", 168 | "metadata": { 169 | "tags": [] 170 | }, 171 | "source": [ 172 | "### **Main body of the tutorial**\n", 173 | "\n", 174 | "Create the body of the tutorial here using a series of code cells. Intersperse the code cells with markdown cells containing descriptions of the steps and any relevant commentary. The number of sub-steps here will vary depending on tutorial content - common steps/actions include data discovery, data access, and file opening/examination.\n", 175 | "\n", 176 | "Some important best practices to consider when designing and creating the tutorial steps include: \n", 177 | " \n", 178 | "*Avoid cognitive overload by restricting your notebook to a single, short task.*\n", 179 | "\n", 180 | "*Limit the amount of code in each cell. Many small code snippets in individual cells are better than a single long block of code.*\n", 181 | "\n", 182 | "*Write code to be understandable (rather than efficient, for example).*\n", 183 | "\n", 184 | "*Confine comments to markdown cells as much as possible.*\n", 185 | "\n", 186 | "For more details please see [Pedagogy Best Practices](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/pedagogy_guide.md)." 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "id": "d66e54ff-71dc-422c-9e8a-5b154fa0dbf7", 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "#code cells - add as many as you need, following advice above." 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "910b2ef6-3e14-475e-b689-77bda4c1814e", 202 | "metadata": {}, 203 | "source": [ 204 | "## **3. Learning outcomes recap (optional)**\n", 205 | "\n", 206 | "Provide a brief summary of the learning outcomes of the tutorial\n" 207 | ] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "id": "e87da360-fa68-43e6-993e-5d5098b70aed", 212 | "metadata": {}, 213 | "source": [ 214 | "## **4. Additional resources (optional)**\n", 215 | "\n", 216 | "List some additional resources for users to consult, if applicable/desired." 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "id": "178b3e1e-71e4-4aab-bdbe-4d33dbce01e9", 222 | "metadata": {}, 223 | "source": [ 224 | "________\n", 225 | "\n", 226 | "### **When your tutorial is ready for review, please read our [Contributor Guide](https://github.com/nsidc/NSIDC-Data-Tutorials/blob/main/contributor_guide.md) for next steps.**" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": null, 232 | "id": "66b7019c-b0c8-4c96-a36c-9cb1d17b6303", 233 | "metadata": {}, 234 | "outputs": [], 235 | "source": [] 236 | } 237 | ], 238 | "metadata": { 239 | "kernelspec": { 240 | "display_name": "Python 3 (ipykernel)", 241 | "language": "python", 242 | "name": "python3" 243 | }, 244 | "language_info": { 245 | "codemirror_mode": { 246 | "name": "ipython", 247 | "version": 3 248 | }, 249 | "file_extension": ".py", 250 | "mimetype": "text/x-python", 251 | "name": "python", 252 | "nbconvert_exporter": "python", 253 | "pygments_lexer": "ipython3", 254 | "version": "3.9.13" 255 | } 256 | }, 257 | "nbformat": 4, 258 | "nbformat_minor": 5 259 | } 260 | --------------------------------------------------------------------------------