├── .dockerignore ├── .github └── workflows │ ├── github_actions_CD.yml │ └── github_actions_CI.yml ├── .gitignore ├── CONTRIBUTING ├── Dockerfile ├── LICENSE ├── README.md ├── docker.test.yml ├── drv └── rrr_drv_MERIT_Hydro_v07_Basins_v01_GLDAS_v20.py ├── requirements.apt ├── requirements.pip ├── src ├── rrr_anl_anm_riv.py ├── rrr_anl_hyd_avg.py ├── rrr_anl_hyd_cdf.py ├── rrr_anl_hyd_cmb.py ├── rrr_anl_hyd_fmt.py ├── rrr_anl_hyd_mod.py ├── rrr_anl_hyd_obs.py ├── rrr_anl_hyd_plt.py ├── rrr_anl_hyd_spa.py ├── rrr_anl_hyd_sts.py ├── rrr_anl_hyd_sts_dig.py ├── rrr_anl_hyd_sts_tbl.py ├── rrr_anl_hyd_sum.py ├── rrr_anl_hyd_uqs.py ├── rrr_anl_hyd_xyp.py ├── rrr_anl_map_cdf.py ├── rrr_anl_map_evt_mod.py ├── rrr_anl_map_mag_mod.py ├── rrr_anl_shp_avg_obs.py ├── rrr_anl_shp_avg_riv.py ├── rrr_anl_spl_csv.py ├── rrr_anl_spl_mod.py ├── rrr_anl_spl_mul.py ├── rrr_anl_spl_shp.py ├── rrr_anl_trg_gls.py ├── rrr_arc_cat_hydrosheds.py ├── rrr_cat_bas.py ├── rrr_cat_tot_gen_one_hydrosheds.py ├── rrr_cat_tot_gen_one_meritbasins.py ├── rrr_cat_tot_gen_one_nhdplus.py ├── rrr_cat_tot_shp_dis_per.py ├── rrr_cpl_riv_lsm_att.py ├── rrr_cpl_riv_lsm_avg.py ├── rrr_cpl_riv_lsm_bia.py ├── rrr_cpl_riv_lsm_bvc.py ├── rrr_cpl_riv_lsm_ens.py ├── rrr_cpl_riv_lsm_err.py ├── rrr_cpl_riv_lsm_lnk.py ├── rrr_cpl_riv_lsm_lum.py ├── rrr_cpl_riv_lsm_rte.py ├── rrr_cpl_riv_lsm_vol.py ├── rrr_lsm_tot_add_cfc.py ├── rrr_lsm_tot_cmb_acc.sh ├── rrr_lsm_tot_ens.py ├── rrr_lsm_tot_ldas.py ├── rrr_lsm_tot_lum.py ├── rrr_lsm_tot_utc_shf.py ├── rrr_mat_snd_box.m ├── rrr_obs_bas_cal.py ├── rrr_obs_bas_sub.py ├── rrr_obs_tot_nwisdv.py ├── rrr_obs_tot_snp.py ├── rrr_obs_tot_tbl.py ├── rrr_riv_bas_gen_one_hydrosheds.py ├── rrr_riv_bas_gen_one_meritbasins.py ├── rrr_riv_bas_gen_one_nhdplus.py ├── rrr_riv_tot_chk_nod_nhdplus.py ├── rrr_riv_tot_cst.py ├── rrr_riv_tot_ext_bas_hydrosheds.py ├── rrr_riv_tot_gen_all_hydrosheds.py ├── rrr_riv_tot_gen_all_meritbasins.py ├── rrr_riv_tot_gen_all_nhdplus.py ├── rrr_riv_tot_net_nav.py ├── rrr_riv_tot_scl_prm.py ├── rrr_riv_tot_trm_shp.py └── rrr_tpl.py ├── tst ├── tst_chk_ncf_fil.py ├── tst_chk_ncf_neg.py ├── tst_chk_srt.py ├── tst_cmp_csv.py ├── tst_cmp_n1d.py ├── tst_cmp_n3d.py ├── tst_cmp_ncf.py ├── tst_cmp_shp.py ├── tst_prf_ncf.py ├── tst_pub_dwnl_Collins_etal_202x_TBD.sh ├── tst_pub_dwnl_David_etal_2019_GRL.sh ├── tst_pub_dwnl_Emery_etal_2020_JHM2.sh ├── tst_pub_dwnl_Sikder_etal_2021_WRR.sh ├── tst_pub_repr_Collins_etal_202x_TBD.sh ├── tst_pub_repr_David_etal_2019_GRL.sh ├── tst_pub_repr_Emery_etal_2020_JHM2.sh └── tst_pub_repr_Sikder_etal_2021_WRR.sh └── version.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #.dockerignore 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #The Docker program is informed here to ignore the following files while sending 7 | #files to the Docker daemon as the build context. Ignoring files helps decrease 8 | #time for build, pull and push of images; and runtime size for containers. 9 | #Author: 10 | #Cedric H. David, 2018-2023 11 | 12 | 13 | #******************************************************************************* 14 | #List of files that Docker will ignore 15 | #******************************************************************************* 16 | 17 | #------------------------------------------------------------------------------- 18 | #The input and output files stored in the sub-directories of input/ and output/ 19 | #------------------------------------------------------------------------------- 20 | input/ 21 | output/ 22 | 23 | #------------------------------------------------------------------------------- 24 | #The files potentially added to the tst/ directory, except for some key files 25 | #------------------------------------------------------------------------------- 26 | tst/* 27 | !tst/tst_cmp_csv.py 28 | !tst/tst_cmp_n1d.py 29 | !tst/tst_cmp_ncf.py 30 | !tst/tst_cmp_shp.py 31 | !tst/tst_chk_srt.py 32 | !tst/tst_chk_ncf.py 33 | !tst/tst_prf_ncf.py 34 | !tst/tst_pub* 35 | 36 | 37 | #******************************************************************************* 38 | #End 39 | #******************************************************************************* 40 | -------------------------------------------------------------------------------- /.github/workflows/github_actions_CD.yml: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #github_actions_CD.yml 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #GitHub Actions is a continuous integration and continuous delivery (CI/CD) 7 | #platform that allows to automate the build, test, and deployment pipeline. The 8 | #The purpose of this file is to give instructions to GitHub on how to do the 9 | #image deployment to Docker Hub. 10 | #Author: 11 | #Cedric H. David, 2022-2023. 12 | 13 | 14 | #******************************************************************************* 15 | #Name and specification of triggers 16 | #******************************************************************************* 17 | name: GitHub Actions CD 18 | on: 19 | push: 20 | branches: 21 | - 'main' 22 | tags: 23 | - 'v*' 24 | - '20*' 25 | 26 | 27 | #******************************************************************************* 28 | #Jobs 29 | #******************************************************************************* 30 | jobs: 31 | 32 | #---------------------------------------------------------------------------- 33 | #Ubuntu 34 | #---------------------------------------------------------------------------- 35 | ubuntu: 36 | runs-on: ubuntu-20.04 37 | 38 | steps: 39 | 40 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 41 | #Display GitHub Action-related information 42 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 43 | - name: Display GitHub Action-related information 44 | run: | 45 | echo "This job was triggered by a ${{ github.event_name }} event." 46 | echo "This job is running on a ${{ runner.os }} server at GitHub!" 47 | echo "The name of the repository is ${{ github.repository }}." 48 | echo "The full git reference of this event is ${{ github.ref }}." 49 | echo "The short git reference of this event is ${{ github.ref_name }}." 50 | echo "The current working directory is $PWD" 51 | echo "The GitHub workspace is ${{ github.workspace }}" 52 | echo "The shell used is $SHELL" 53 | 54 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 55 | #Checkout RRR code 56 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 57 | - name: Checkout RRR code 58 | uses: actions/checkout@v4 59 | 60 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 61 | #Set up QEMU 62 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 63 | - name: Set up QEMU 64 | uses: docker/setup-qemu-action@v3 65 | 66 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 67 | #Set up Docker Buildx 68 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 69 | - name: Set up Docker Buildx 70 | uses: docker/setup-buildx-action@v3 71 | 72 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 73 | #Login to Docker Hub 74 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 75 | - name: Login to Docker Hub 76 | uses: docker/login-action@v3 77 | with: 78 | username: ${{ secrets.DOCKER_HUB_NAME }} 79 | password: ${{ secrets.DOCKER_HUB_TOKN }} 80 | 81 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 82 | #Build and push to Docker Hub 83 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 84 | - name: Build and push 85 | uses: docker/build-push-action@v5 86 | with: 87 | push: true 88 | platforms: linux/amd64,linux/arm64 89 | tags: | 90 | ${{ secrets.DOCKER_HUB_NAME }}/rrr:latest 91 | ${{ secrets.DOCKER_HUB_NAME }}/rrr:${{ github.ref_name }} 92 | 93 | 94 | #******************************************************************************* 95 | #End 96 | #******************************************************************************* 97 | -------------------------------------------------------------------------------- /.github/workflows/github_actions_CI.yml: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #github_actions_CI.yml 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #GitHub Actions is a continuous integration and continuous delivery (CI/CD) 7 | #platform that allows to automate the build, test, and deployment pipeline. The 8 | #The purpose of this file is to give instructions to GitHub on how to do the 9 | #integration work. 10 | #Authors: 11 | #Jay Siri, Cedric H. David, 2022-2023. 12 | 13 | 14 | #******************************************************************************* 15 | #Name and specification of triggers 16 | #******************************************************************************* 17 | name: GitHub Actions CI 18 | on: 19 | push: 20 | pull_request: 21 | 22 | 23 | #******************************************************************************* 24 | #Jobs 25 | #******************************************************************************* 26 | jobs: 27 | 28 | #---------------------------------------------------------------------------- 29 | #Ubuntu 30 | #---------------------------------------------------------------------------- 31 | build: 32 | runs-on: ubuntu-20.04 33 | strategy: 34 | matrix: 35 | include: 36 | - dwnl: ./tst_pub_dwnl_Emery_etal_2020_JHM2.sh 37 | repr: ./tst_pub_repr_Emery_etal_2020_JHM2.sh 38 | 39 | steps: 40 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 41 | #Checkout RAPID code 42 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 43 | - name: Checkout RRR code 44 | uses: actions/checkout@v3 45 | 46 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 47 | #Display GitHub Action-related information 48 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 49 | - name: Display GitHub Action-related information 50 | run: | 51 | echo "This job was triggered by a ${{ github.event_name }} event." 52 | echo "This job is running on a ${{ runner.os }} server at GitHub!" 53 | echo "The name of the repository is ${{ github.repository }}." 54 | echo "The name of the branch is ${{ github.ref }}." 55 | echo "The current working directory is $PWD" 56 | echo "The GitHub workspace is ${{ github.workspace }}" 57 | echo "The shell used is $SHELL" 58 | 59 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 60 | #Install OS requirements 61 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 62 | - name: Install OS requirements 63 | run: | 64 | sudo apt-get update 65 | sudo apt-get install -y --no-install-recommends $(grep -v -E '(^#|^$)' requirements.apt) 66 | sudo rm -f /usr/bin/python3 67 | sudo ln -s /usr/bin/python3.9 /usr/bin/python3 68 | 69 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 70 | #Install Python requirements 71 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 72 | - name: Install Python requirements 73 | run: | 74 | wget https://bootstrap.pypa.io/pip/get-pip.py 75 | sudo python3 get-pip.py --no-cache-dir `grep 'pip==' requirements.pip` `grep 'setuptools==' requirements.pip` `grep 'wheel==' requirements.pip` 76 | rm get-pip.py 77 | which pip3 78 | pip3 --version 79 | sudo pip3 install --no-cache-dir -r requirements.pip 80 | 81 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 82 | #Run tests 83 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 84 | - name: Run tests 85 | env: 86 | TST_PUB_DWNL: ${{matrix.dwnl}} 87 | TST_PUB_REPR: ${{matrix.repr}} 88 | run: | 89 | cd $GITHUB_WORKSPACE 90 | cd ./tst 91 | echo $TST_PUB_DWNL && $TST_PUB_DWNL 92 | echo $TST_PUB_REPR && $TST_PUB_REPR 93 | 94 | 95 | #******************************************************************************* 96 | #End 97 | #******************************************************************************* 98 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #.gitignore 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #The git program is informed here to ignore the following files while performing 7 | #its distributed revision control and source code management. 8 | #Author: 9 | #Cedric H. David, 2015-2023 10 | 11 | 12 | #******************************************************************************* 13 | #List of files that git will ignore 14 | #******************************************************************************* 15 | 16 | #------------------------------------------------------------------------------- 17 | #The input and output files stored in the sub-directories of input/ and output/ 18 | #------------------------------------------------------------------------------- 19 | input/ 20 | output/ 21 | 22 | #------------------------------------------------------------------------------- 23 | #The files potentially added to the tst/ directory, except for some key files 24 | #------------------------------------------------------------------------------- 25 | tst/* 26 | !tst/tst_cmp_csv.py 27 | !tst/tst_cmp_n1d.py 28 | !tst/tst_cmp_n3d.py 29 | !tst/tst_cmp_ncf.py 30 | !tst/tst_cmp_shp.py 31 | !tst/tst_chk_srt.py 32 | !tst/tst_chk_ncf_fil.py 33 | !tst/tst_chk_ncf_neg.py 34 | !tst/tst_prf_ncf.py 35 | !tst/tst_pub* 36 | 37 | 38 | #******************************************************************************* 39 | #End 40 | #******************************************************************************* 41 | -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | There are many ways in which RRR can be improved, and many new features that 2 | should be added to it. We therefore enthusiastically welcome contributions 3 | from others! If you are interested in helping with the development of RRR, 4 | please contact the lead developer to discuss your ideas before embarking on the 5 | coding journey, as such will help ensure the least overlap with ongoing efforts 6 | and the most efficient use of your time. 7 | 8 | The following people have so far contributed to the development of RRR: 9 | 10 | Lead developer: 11 | - Cedric H. David 12 | 13 | Contributing developers (thank you!!!): 14 | - George H. Allen 15 | - Jeffrey S. Wade 16 | - Konstantinos M. Andreadis 17 | - Klemen Cotar 18 | - Etienne Fluet Chouinard 19 | - Ashish Mahabal 20 | - M. Safat Sikder 21 | - Alex Christopher Lim 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #Dockerfile 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #This file describes the operating system prerequisites for RRR, and is used by 7 | #the Docker software. 8 | #Author: 9 | #Cedric H. David, 2018-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Usage 14 | #******************************************************************************* 15 | #docker build -t rrr:myimage -f Dockerfile . #Create image 16 | #docker run --rm --name rrr_mycontainer \ 17 | # -it rrr:myimage #Run image in container 18 | #docker run --rm --name rrr_mycontainer \ 19 | # -v $PWD/input:/home/rrr/input \ 20 | # -v $PWD/output:/home/rrr/output \ 21 | # -it rrr:myimage #Run and map volumes 22 | #docker save -o rrr_myimage.tar rrr:myimage #Save a copy of image 23 | #docker load -i rrr_myimage.tar #Load a saved image 24 | 25 | 26 | #******************************************************************************* 27 | #Operating System 28 | #******************************************************************************* 29 | FROM debian:11.7-slim 30 | 31 | 32 | #******************************************************************************* 33 | #Copy files into Docker image (this ignores the files listed in .dockerignore) 34 | #******************************************************************************* 35 | WORKDIR /home/rrr/ 36 | COPY . . 37 | 38 | 39 | #******************************************************************************* 40 | #Operating System Requirements 41 | #******************************************************************************* 42 | RUN apt-get update && \ 43 | apt-get install -y --no-install-recommends $(grep -v -E '(^#|^$)' requirements.apt) && \ 44 | rm -rf /var/lib/apt/lists/* 45 | 46 | 47 | #******************************************************************************* 48 | #Python requirements 49 | #******************************************************************************* 50 | ADD https://bootstrap.pypa.io/pip/get-pip.py . 51 | RUN python3 get-pip.py --no-cache-dir \ 52 | `grep 'pip==' requirements.pip` \ 53 | `grep 'setuptools==' requirements.pip` \ 54 | `grep 'wheel==' requirements.pip` && \ 55 | rm get-pip.py 56 | 57 | RUN pip3 install --no-cache-dir -r requirements.pip 58 | 59 | 60 | #******************************************************************************* 61 | #Intended (default) command at execution of image (not used during build) 62 | #******************************************************************************* 63 | CMD /bin/bash 64 | 65 | 66 | #******************************************************************************* 67 | #End 68 | #******************************************************************************* 69 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2007-2023, Cedric H. David 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, 6 | are permitted provided that the following conditions are met: 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, this 10 | list of conditions and the following disclaimer in the documentation and/or 11 | other materials provided with the distribution. 12 | * The name Cedric H. David may not be used to endorse or promote products 13 | derived from this software without specific prior written permission. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL CEDRIC H. DAVID BE LIABLE FOR ANY DIRECT, 19 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 23 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 24 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RRR 2 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3236649.svg)](https://doi.org/10.5281/zenodo.3236649) 3 | 4 | [![License (3-Clause BSD)](https://img.shields.io/badge/license-BSD%203--Clause-yellow.svg)](https://github.com/c-h-david/rrr/blob/main/LICENSE) 5 | 6 | [![Docker Images](https://img.shields.io/badge/docker-images-blue?logo=docker)](https://hub.docker.com/r/chdavid/rrr/tags) 7 | 8 | [![GitHub CI Status](https://github.com/c-h-david/rrr/actions/workflows/github_actions_CI.yml/badge.svg)](https://github.com/c-h-david/rrr/actions/workflows/github_actions_CI.yml) 9 | 10 | [![GitHub CD Status](https://github.com/c-h-david/rrr/actions/workflows/github_actions_CD.yml/badge.svg)](https://github.com/c-h-david/rrr/actions/workflows/github_actions_CD.yml) 11 | 12 | The Reproducible Routing Rituals (RRR) is a Python and bash shell toolbox that 13 | combines many repetitive pre and post-processing tasks that are common to 14 | studying the movements of water on and underneath the land surface. Such tasks 15 | include the preparation of files corresponding to: 16 | 17 | - River network details (connectivity, parameters, sort, coordinates, subset) 18 | - Contributing catchments information (area, coordinates) 19 | - Reformatted land surface model outputs 20 | - Coupling of LSM outputs and catchments to estimate water inflow into rivers 21 | - Observed gauge data 22 | - Analysis of these and associated data from a hydrological perpective 23 | 24 | Vector-based ("blue line") river networks and associated contributing catchments 25 | can be used from the following datasets: 26 | 27 | - The enhanced National Hydrography Dataset (NHDPlus, versions 1 and 2) 28 | - The Hydrological data and maps based on SHuttle Elevation Derivatives at 29 | multiple Scales (HydroSHEDS) 30 | - The Multi-Error-Removed Improved-Terrain (MERIT) Basins 31 | 32 | Surface and subsurface runoff are obtained using model outputs from: 33 | 34 | - The Global Land Data Assimilation System (GLDAS) 35 | - The North American Land Data Assimilation System (NLDAS) 36 | 37 | Water inflow from the land surface models and into the hydrographic networks are 38 | formatted for use within: 39 | 40 | - The Routing Application for Parallel computatIon of Discharge (RAPID) 41 | 42 | Observed gauges are gathered from: 43 | 44 | - The National Water Information System (NWIS) 45 | 46 | Hydrological data analysis is done for the above datasets, as well as model 47 | outputs from: 48 | 49 | - The Routing Application for Parallel computatIon of Discharge (RAPID) 50 | 51 | RRR is specifically designed to work hand-in-hand with RAPID. Further 52 | information on both RAPID and RRR can be found on the the RAPID website at: 53 | [http://rapid-hub.org/](http://rapid-hub.org/). 54 | 55 | ## Installation with Docker 56 | Installing RRR is **by far the easiest with Docker**. This document was 57 | written and tested using 58 | [Docker Community Edition](https://www.docker.com/community-edition#/download) 59 | which is available for free and can be installed on a wide variety of operating 60 | systems. To install it, follow the instructions in the link provided above. 61 | 62 | Note that the experienced users may find more up-to-date installation 63 | instructions in 64 | [Dockerfile](https://github.com/c-h-david/rrr/blob/main/Dockerfile). 65 | 66 | ### Download RRR 67 | Downloading RRR with Docker can be done using: 68 | 69 | ``` 70 | $ docker pull chdavid/rrr 71 | ``` 72 | 73 | ### Install packages 74 | The beauty of Docker is that there is **no need to install anymore packages**. 75 | RRR is ready to go! To run it, just use: 76 | 77 | ``` 78 | $ docker run --rm -it chdavid/rrr 79 | ``` 80 | 81 | ## Testing with Docker 82 | Testing scripts are currently under development. 83 | 84 | Note that the experienced users may find more up-to-date testing instructions 85 | in 86 | [docker.test.yml](https://github.com/c-h-david/rrr/blob/main/docker.test.yml). 87 | 88 | ## Installation on Debian 89 | This document was written and tested on a machine with a **clean** image of 90 | [Debian 11.7.0 ARM64](https://cdimage.debian.org/cdimage/archive/11.7.0/arm64/iso-cd/debian-11.7.0-arm64-netinst.iso) 91 | installed, *i.e.* **no update** was performed, and **no upgrade** either. 92 | Similar steps **may** be applicable for Ubuntu. 93 | 94 | Note that the experienced users may find more up-to-date installation 95 | instructions in 96 | [github\_actions\_CI.yml](https://github.com/c-h-david/rrr/blob/main/.github/workflows/github_actions_CI.yml). 97 | 98 | ### Download RRR 99 | First, make sure that `git` is installed: 100 | 101 | ``` 102 | $ sudo apt-get install -y --no-install-recommends git 103 | ``` 104 | 105 | Then download RRR: 106 | 107 | ``` 108 | $ git clone https://github.com/c-h-david/rrr 109 | ``` 110 | 111 | Finally, enter the RRR directory: 112 | 113 | ``` 114 | $ cd rrr/ 115 | ``` 116 | 117 | ### Install APT packages 118 | Software packages for the Advanced Packaging Tool (APT) are summarized in 119 | [requirements.apt](https://github.com/c-h-david/rrr/blob/main/requirements.apt) 120 | and can be installed with `apt-get`. All packages can be installed at once using: 121 | 122 | ``` 123 | $ sudo apt-get install -y --no-install-recommends $(grep -v -E '(^#|^$)' requirements.apt) 124 | ``` 125 | 126 | > Alternatively, one may install the APT packages listed in 127 | > [requirements.apt](https://github.com/c-h-david/rrr/blob/main/requirements.apt) 128 | > one by one, for example: 129 | > 130 | > ``` 131 | > $ sudo apt-get install -y --no-install-recommends python3.9 132 | >``` 133 | 134 | Also make sure that `python3` points to `python3.9`: 135 | 136 | ``` 137 | $ sudo rm -f /usr/bin/python3 138 | $ sudo ln -s /usr/bin/python3.9 /usr/bin/python3 139 | ``` 140 | 141 | ### Install Python packages 142 | Python packages from the Python Package Index (PyPI) are summarized in 143 | [requirements.pip](https://github.com/c-h-david/rrr/blob/main/requirements.pip) 144 | and can be installed with `pip`. But first, let's make sure that the latest 145 | version of `pip` is installed 146 | 147 | ``` 148 | $ wget https://bootstrap.pypa.io/pip/get-pip.py 149 | $ sudo python3 get-pip.py --no-cache-dir `grep 'pip==' requirements.pip` `grep 'setuptools==' requirements.pip` `grep 'wheel==' requirements.pip` 150 | $ rm get-pip.py 151 | ``` 152 | 153 | All packages can be installed at once using: 154 | 155 | ``` 156 | $ sudo pip3 install --no-cache-dir -r requirements.pip 157 | ``` 158 | 159 | > Alternatively, one may install the PyPI packages listed in 160 | > [requirements.pip](https://github.com/c-h-david/rrr/blob/main/requirements.pip) 161 | > one by one, for example: 162 | > 163 | > ``` 164 | > $ sudo pip3 install dbf==0.99.2 165 | > ``` 166 | 167 | ## Testing on Debian 168 | Testing scripts are currently under development. 169 | 170 | Note that the experienced users may find more up-to-date testing instructions 171 | in 172 | [github\_actions\_CI.yml](https://github.com/c-h-david/rrr/blob/main/.github/workflows/github_actions_CI.yml). 173 | -------------------------------------------------------------------------------- /docker.test.yml: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #docker.test.yml 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #Docker-compose can be used to build a Docker image from a Dockerfile and run 7 | #the image in a Docker container using a specific command that performs a series 8 | #of tests on the software included in the image. The purpose of this 9 | #docker.test.yml file is to give instructions to docker-compose on how to do the 10 | #work. 11 | #Author: 12 | #Cedric H. David, 2018-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Usage 17 | #******************************************************************************* 18 | #docker-compose -f docker.test.yml up #Create image if it does not exist, and 19 | #run it in a container w/ the command. 20 | #Note: command has no impact on image. 21 | #docker-compose -f docker.test.yml down #Remove the container, not the image. 22 | 23 | 24 | #******************************************************************************* 25 | #Test 26 | #******************************************************************************* 27 | sut: 28 | build: . 29 | #The name of the Docker image that is created will be rrr_sut, this name 30 | #is generated automatically based on the folder where docker-compose is run 31 | #(rrr) and the service name (sut, i.e. 'system under test') 32 | command: bash -xc "echo machine urs.earthdata.nasa.gov login $NETRC_LOGIN_EDATA password $NETRC_PSWRD_EDATA >> ~/.netrc;"\ 33 | "cd ./tst/;"\ 34 | "./tst_pub_dwnl_Emery_etal_2020_JHM2.sh;"\ 35 | "./tst_pub_repr_Emery_etal_2020_JHM2.sh" 36 | #bash -c (string) allows to make the code more readable here 37 | #bash -x (verbose) allows to make the commands more readable at runtime 38 | volumes: 39 | - ./input:/home/rrr/input 40 | - ./output:/home/rrr/output 41 | #Volumes map the input & output folders in the local HD to the container 42 | 43 | 44 | #******************************************************************************* 45 | #End 46 | #******************************************************************************* 47 | -------------------------------------------------------------------------------- /requirements.apt: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #requirements.apt 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #This file lists all the packages from the Advanced Packaging Tool that are 7 | #required by RRR, and is used by the "apt-get" software. 8 | #Author: 9 | #Cedric H. David, 2017-2023 10 | 11 | #******************************************************************************* 12 | #Usage 13 | #******************************************************************************* 14 | #sudo apt-get install -y $(grep -v -E '(^#|^$)' requirements.apt) 15 | 16 | 17 | #******************************************************************************* 18 | #Requirements for apt-get 19 | #******************************************************************************* 20 | 21 | #------------------------------------------------------------------------------- 22 | #Code management 23 | #------------------------------------------------------------------------------- 24 | git 25 | #version control system 26 | vim 27 | #text editor 28 | zip 29 | #file compression 30 | 31 | #------------------------------------------------------------------------------- 32 | #Code building 33 | #------------------------------------------------------------------------------- 34 | python3.9-dev 35 | #Includes binary libraries (python3.9) and header files (python3.9-dev) 36 | python3-distutils 37 | #python3 support for building and installing additional modules 38 | g++ 39 | #includes C (gcc) and C++ (g++) compilers 40 | libgdal-dev 41 | #files needed to develop a software that use GDAL/OGR 42 | libspatialindex-dev 43 | #spatial indexing methods, needed for rtree 44 | ffmpeg 45 | #multimedia file transcoding 46 | nco 47 | #netCDF operators 48 | 49 | #------------------------------------------------------------------------------- 50 | #Code testing 51 | #------------------------------------------------------------------------------- 52 | wget 53 | #download utility 54 | ca-certificates 55 | #certificate authority 56 | unzip 57 | #file extraction 58 | netcdf-bin 59 | #netCDF binaries 60 | 61 | 62 | #******************************************************************************* 63 | #End 64 | #******************************************************************************* 65 | -------------------------------------------------------------------------------- /requirements.pip: -------------------------------------------------------------------------------- 1 | #******************************************************************************* 2 | #requirements.pip 3 | #******************************************************************************* 4 | 5 | #Purpose: 6 | #This file lists all the packages from the Python Packaging Index that are 7 | #required by RRR, and is used by the "pip" software. 8 | #Author: 9 | #Cedric H. David, 2015-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Usage 14 | #******************************************************************************* 15 | #sudo pip install -r requirements.pip 16 | 17 | 18 | #******************************************************************************* 19 | #Requirements for pip 20 | #******************************************************************************* 21 | 22 | #------------------------------------------------------------------------------- 23 | #Package management 24 | #------------------------------------------------------------------------------- 25 | pip==23.1.2 26 | setuptools==62.6.0 27 | wheel==0.37.1 28 | 29 | #------------------------------------------------------------------------------- 30 | #Packages 31 | #------------------------------------------------------------------------------- 32 | dbf==0.99.2 33 | Fiona==1.8.21 34 | flake8==7.0.0 35 | lxml==4.9.0 36 | matplotlib==3.5.2 37 | netCDF4==1.6.0 38 | numpy==1.21.6 39 | pandas==1.3.5 40 | progressbar==2.5 41 | pyproj==3.2.1 42 | pyshp==2.3.0 43 | rasterio==1.2.10 44 | requests==2.28.1 45 | Rtree==1.0.0 46 | scipy==1.7.3 47 | Shapely==1.8.2 48 | 49 | 50 | #******************************************************************************* 51 | #End 52 | #******************************************************************************* 53 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_avg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_avg.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a a csv file in which the time series of observed or modeled quantities 8 | #are stored, this program produces a csv file in which the monthly averages 9 | #of the same quantities are stored. 10 | #Author: 11 | #Cedric H. David, 2018-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import pandas 19 | 20 | 21 | #******************************************************************************* 22 | #Declaration of variables (given as command line arguments) 23 | #******************************************************************************* 24 | # 1 - rrr_hyd_csv 25 | # 2 - rrr_avg_csv 26 | 27 | 28 | #******************************************************************************* 29 | #Get command line arguments 30 | #******************************************************************************* 31 | IS_arg=len(sys.argv) 32 | if IS_arg != 3: 33 | print('ERROR - 2 and only 2 arguments can be used') 34 | raise SystemExit(22) 35 | 36 | rrr_hyd_csv=sys.argv[1] 37 | rrr_avg_csv=sys.argv[2] 38 | 39 | 40 | #******************************************************************************* 41 | #Print input information 42 | #******************************************************************************* 43 | print('Command line inputs') 44 | print('- '+rrr_hyd_csv) 45 | print('- '+rrr_avg_csv) 46 | 47 | 48 | #******************************************************************************* 49 | #Check if files exist 50 | #******************************************************************************* 51 | try: 52 | with open(rrr_hyd_csv) as file: 53 | pass 54 | except IOError as e: 55 | print('ERROR - Unable to open '+rrr_hyd_csv) 56 | raise SystemExit(22) 57 | 58 | 59 | #******************************************************************************* 60 | #Read rrr_hyd_csv 61 | #******************************************************************************* 62 | print('Read rrr_hyd_csv') 63 | 64 | df1=pandas.read_csv(rrr_hyd_csv) 65 | #Read the csv file using Pandas 66 | 67 | YS_name=df1.columns.values[0] 68 | #The header of the first column (e.g. USGS, RAPID, etc.) which contains dat$es 69 | 70 | df1[YS_name]=pandas.to_datetime(df1[YS_name]) 71 | #Convert the first column to DateTime 72 | 73 | df1.set_index(YS_name,inplace=True) 74 | #Sets the index of the dataframe as the first column 75 | 76 | IS_row1=df1.shape[0] 77 | IS_col1=df1.shape[1] 78 | 79 | IS_max_NaN1=0 80 | for col in df1: 81 | IS_max_NaN1=max(IS_max_NaN1, len(df1[col])-df1[col].count()) 82 | 83 | print('- Number of time steps in rrr_hyd_csv: '+str(IS_row1)) 84 | print('- Number of river reaches in rrr_hyd_csv: '+str(IS_col1)) 85 | print('- Max number of NaNs per river reach in rrr_hyd_csv: '+str(IS_max_NaN1)) 86 | if IS_max_NaN1 !=0: print('WARNING: There are NaNs') 87 | 88 | 89 | #******************************************************************************* 90 | #Compute monthly averages 91 | #******************************************************************************* 92 | print('Compute monthly averages') 93 | 94 | df2=df1.resample('M').mean() 95 | #Compute the monthly average 96 | 97 | IS_row2=df2.shape[0] 98 | IS_col2=df2.shape[1] 99 | 100 | IS_max_NaN2=0 101 | for col in df2: 102 | IS_max_NaN2=max(IS_max_NaN2, len(df2[col])-df2[col].count()) 103 | 104 | print('- Number of time steps in rrr_avg_csv: '+str(IS_row2)) 105 | print('- Number of river reaches in rrr_avg_csv: '+str(IS_col2)) 106 | print('- Max number of NaNs per river reach in rrr_avg_csv: '+str(IS_max_NaN2)) 107 | if IS_max_NaN2 !=0: print('WARNING: There are NaNs') 108 | 109 | 110 | #******************************************************************************* 111 | #Write rrr_avg_csv 112 | #******************************************************************************* 113 | print('Write rrr_avg_csv') 114 | 115 | df2.to_csv(rrr_avg_csv,na_rep='NaN') 116 | #Writing NaNs as 'NaN' 117 | 118 | 119 | #******************************************************************************* 120 | #End 121 | #******************************************************************************* 122 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_cmb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_cmb.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given n CSV files with timeseries, and the name of a new CSV file, this program 8 | #concatenates all timeseries and saves them in the new CSV file. 9 | #Author: 10 | #Cedric H. David, 2023-2023 11 | 12 | 13 | #******************************************************************************* 14 | #Import Python modules 15 | #******************************************************************************* 16 | import sys 17 | import os.path 18 | import csv 19 | import pandas 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables (given as command line arguments) 24 | #******************************************************************************* 25 | # 1 - rrr_hyd_csv 26 | # 2 - rrr_hyd_csv 27 | # . - rrr_hyd_csv 28 | # n - rrr_hyd_csv 29 | #n+1- rrr_cmb_csv 30 | 31 | 32 | #******************************************************************************* 33 | #Get command line arguments 34 | #******************************************************************************* 35 | IS_arg=len(sys.argv) 36 | if IS_arg < 4 : 37 | print('ERROR - A minimum of 3 arguments must be used') 38 | raise SystemExit(22) 39 | 40 | rrr_cmb_csv=sys.argv[IS_arg-1] 41 | 42 | 43 | #******************************************************************************* 44 | #Print input information 45 | #******************************************************************************* 46 | print('Command line inputs') 47 | print('- '+str(IS_arg-2)+' timeseries file(s) provided') 48 | print('- '+rrr_cmb_csv) 49 | 50 | 51 | #******************************************************************************* 52 | #Check if files exist 53 | #******************************************************************************* 54 | for JS_arg in range(1,IS_arg-1): 55 | rrr_hyd_csv=sys.argv[JS_arg] 56 | try: 57 | with open(rrr_hyd_csv) as file: 58 | pass 59 | except IOError as e: 60 | print('ERROR - Unable to open '+rrr_hyd_csv) 61 | raise SystemExit(22) 62 | 63 | 64 | #******************************************************************************* 65 | #Read first timeseries 66 | #******************************************************************************* 67 | print('Read first timeseries') 68 | 69 | rrr_hyd_csv=sys.argv[1] 70 | #The first timeseries file 71 | 72 | df1=pandas.read_csv(rrr_hyd_csv) 73 | #Read the csv file using Pandas 74 | 75 | YS_tim1=df1.columns.values[0] 76 | #The header of the first column which contains dates 77 | 78 | df1[YS_tim1]=pandas.to_datetime(df1[YS_tim1]) 79 | #Convert the first column to DateTime 80 | 81 | df1.set_index(YS_tim1,inplace=True) 82 | #Sets the index of the dataframe as the first column 83 | 84 | IS_tim1=df1.shape[0] 85 | #Number of timesteps 86 | 87 | ZV_tim1=df1.index.values.tolist() 88 | #Array with time values 89 | 90 | dfc=df1.copy(deep=True) 91 | #New object created with a copy of df1 92 | 93 | 94 | #******************************************************************************* 95 | #Read other timeseries 96 | #******************************************************************************* 97 | print('Read other timeseries') 98 | 99 | for JS_arg in range(2,IS_arg-1): 100 | 101 | rrr_hyd_csv=sys.argv[JS_arg] 102 | #The first timeseries file 103 | 104 | dfj=pandas.read_csv(rrr_hyd_csv) 105 | #Read the csv file using Pandas 106 | 107 | YS_timj=dfj.columns.values[0] 108 | #The header of the first column which contains dates 109 | 110 | dfj[YS_timj]=pandas.to_datetime(dfj[YS_timj]) 111 | #Convert the first column to DateTime 112 | 113 | dfj.set_index(YS_timj,inplace=True) 114 | #Sets the index of the dataframe as the first column 115 | 116 | IS_timj=dfj.shape[0] 117 | #Number of timesteps 118 | 119 | ZV_timj=dfj.index.values.tolist() 120 | #Array with time values 121 | 122 | if ZV_timj==ZV_tim1: 123 | dfc=pandas.merge(dfc,dfj,on=YS_tim1) 124 | else: 125 | print('ERROR - Unable inconsistent time values') 126 | raise SystemExit(22) 127 | 128 | 129 | #******************************************************************************* 130 | #Write CSV file 131 | #******************************************************************************* 132 | print('Write CSV file') 133 | 134 | dfc.to_csv(rrr_cmb_csv,na_rep='NaN') 135 | 136 | print('- Done') 137 | 138 | 139 | #******************************************************************************* 140 | #End 141 | #******************************************************************************* 142 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_spa.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_spa.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a netCDF file with modeled values for river discharge (Qout) or storage 8 | #(V) and a shapefile with a subset of the available river reaches; this program 9 | #produces a CSV file containing a time series of spatially aggregated values for 10 | #the subset. 11 | #Author: 12 | #Cedric H. David, 2023-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Import Python modules 17 | #******************************************************************************* 18 | import sys 19 | import fiona 20 | import netCDF4 21 | import datetime 22 | import csv 23 | import os.path 24 | 25 | 26 | #******************************************************************************* 27 | #Declaration of variables (given as command line arguments) 28 | #******************************************************************************* 29 | # 1 - rrr_mod_ncf 30 | # 2 - rrr_riv_shp 31 | # 3 - rrr_spa_csv 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg != 4: 39 | print('ERROR - 3 and only 3 arguments can be used') 40 | raise SystemExit(22) 41 | 42 | rrr_mod_ncf=sys.argv[1] 43 | rrr_riv_shp=sys.argv[2] 44 | rrr_spa_csv=sys.argv[3] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+rrr_mod_ncf) 52 | print('- '+rrr_riv_shp) 53 | print('- '+rrr_spa_csv) 54 | 55 | 56 | #******************************************************************************* 57 | #Check if files exist 58 | #******************************************************************************* 59 | try: 60 | with open(rrr_mod_ncf) as file: 61 | pass 62 | except IOError as e: 63 | print('ERROR - Unable to open '+rrr_mod_ncf) 64 | raise SystemExit(22) 65 | 66 | try: 67 | with open(rrr_riv_shp) as file: 68 | pass 69 | except IOError as e: 70 | print('ERROR - Unable to open '+rrr_riv_shp) 71 | raise SystemExit(22) 72 | 73 | 74 | #******************************************************************************* 75 | #Read netCDF file static data 76 | #******************************************************************************* 77 | print('Reading netCDF file static data') 78 | 79 | #------------------------------------------------------------------------------- 80 | #Open netCDF file 81 | #------------------------------------------------------------------------------- 82 | f = netCDF4.Dataset(rrr_mod_ncf, 'r') 83 | 84 | #------------------------------------------------------------------------------- 85 | #Get dimensions/variables names 86 | #------------------------------------------------------------------------------- 87 | if 'COMID' in f.dimensions: 88 | YS_mod_rid='COMID' 89 | elif 'rivid' in f.dimensions: 90 | YS_mod_rid='rivid' 91 | else: 92 | print('ERROR - neither COMID nor rivid exist in'+rrr_mod_ncf) 93 | raise SystemExit(22) 94 | 95 | if 'Time' in f.dimensions: 96 | YS_mod_tim='Time' 97 | elif 'time' in f.dimensions: 98 | YS_mod_tim='time' 99 | else: 100 | print('ERROR - Neither Time nor time exist in '+rrr_mod_ncf) 101 | raise SystemExit(22) 102 | 103 | if 'Qout' in f.variables: 104 | YS_mod_var='Qout' 105 | elif 'V' in f.variables: 106 | YS_mod_var='V' 107 | else: 108 | print('ERROR - neither Qout nor V exist in'+rrr_mod_ncf) 109 | raise SystemExit(22) 110 | 111 | #------------------------------------------------------------------------------- 112 | #Get variable sizes 113 | #------------------------------------------------------------------------------- 114 | IS_mod_rid=len(f.variables[YS_mod_rid]) 115 | print('- Number of river reaches: '+str(IS_mod_rid)) 116 | 117 | IS_mod_tim=len(f.variables[YS_mod_tim]) 118 | print('- Number of time steps: '+str(IS_mod_tim)) 119 | 120 | #------------------------------------------------------------------------------- 121 | #Get river IDs 122 | #------------------------------------------------------------------------------- 123 | print('- Get river IDs') 124 | 125 | IV_mod_rid=f.variables[YS_mod_rid] 126 | 127 | #------------------------------------------------------------------------------- 128 | #Get time variable values 129 | #------------------------------------------------------------------------------- 130 | print('- Get time variable values') 131 | 132 | ZV_time=[0]*IS_mod_tim 133 | YV_time=['']*IS_mod_tim 134 | if YS_mod_tim in f.variables and \ 135 | f.variables[YS_mod_tim][0]!=netCDF4.default_fillvals['i4']: 136 | #If the time variable exists but was not populated it holds the default 137 | #netCDF _fillValue and should be ignored here 138 | print(' . Values of time variable obtained from metadata') 139 | ZV_time=f.variables[YS_mod_tim][:] 140 | for JS_mod_tim in range(IS_mod_tim): 141 | YS_time=datetime.datetime.fromtimestamp(ZV_time[JS_mod_tim], \ 142 | datetime.timezone.utc) 143 | YS_time=YS_time.strftime('%Y-%m-%d') 144 | YV_time[JS_mod_tim]=YS_time 145 | 146 | 147 | #******************************************************************************* 148 | #Read rrr_riv_shp 149 | #******************************************************************************* 150 | print('Read rrr_riv_shp') 151 | 152 | rrr_riv_lay=fiona.open(rrr_riv_shp, 'r') 153 | 154 | if 'COMID' in rrr_riv_lay.schema['properties']: 155 | YS_riv_rid='COMID' 156 | elif 'rivid' in rrr_riv_lay.schema['properties']: 157 | YS_riv_rid='rivid' 158 | else: 159 | print('ERROR - COMID, rivid do not exist in '+rrr_riv_shp) 160 | raise SystemExit(22) 161 | 162 | IV_riv_rid=[] 163 | for rrr_riv_fea in rrr_riv_lay: 164 | IV_riv_rid.append(rrr_riv_fea['properties'][YS_riv_rid]) 165 | 166 | IS_riv_rid=len(IV_riv_rid) 167 | print('- Number of river reaches in rrr_riv_shp: '+str(IS_riv_rid)) 168 | 169 | 170 | #******************************************************************************* 171 | #Make hash table 172 | #******************************************************************************* 173 | print('- Make hash table') 174 | IM_hsh={} 175 | for JS_mod_rid in range(IS_mod_rid): 176 | IM_hsh[int(IV_mod_rid[JS_mod_rid])]=JS_mod_rid 177 | 178 | IV_mod_idx=[IM_hsh[rid] for rid in IV_riv_rid] 179 | 180 | 181 | #******************************************************************************* 182 | #Read netCDF file dynamic data 183 | #******************************************************************************* 184 | print('Reading netCDF file dynamic data') 185 | 186 | ZV_spa=[] 187 | for JS_mod_tim in range(IS_mod_tim): 188 | ZV_tmp=f.variables[YS_mod_var][JS_mod_tim,:] 189 | ZV_spa.append(sum(ZV_tmp[IV_mod_idx])) 190 | 191 | 192 | #******************************************************************************* 193 | #Write CSV file 194 | #******************************************************************************* 195 | print('Write CSV file') 196 | 197 | with open(rrr_spa_csv, 'w') as csvfile: 198 | csvwriter = csv.writer(csvfile, dialect='excel') 199 | csvwriter.writerow([os.path.basename(rrr_mod_ncf),YS_mod_var]) 200 | for JS_mod_tim in range(IS_mod_tim): 201 | csvwriter.writerow([YV_time[JS_mod_tim],ZV_spa[JS_mod_tim]]) 202 | #Write hydrographs 203 | 204 | 205 | #******************************************************************************* 206 | #End 207 | #******************************************************************************* 208 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_sts_dig.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_sts_dig.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a CSV statistics table, and shapefile with a subset of stations, this 8 | #program produces a summary of statistics and saves a digested version in a new 9 | #CSV file. 10 | #Author: 11 | #Cedric H. David, 2020-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import fiona 19 | import pandas 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables (given as command line arguments) 24 | #******************************************************************************* 25 | # 1 - rrr_sts_csv 26 | # 2 - rrr_obs_shp 27 | # 3 - rrr_dig_csv 28 | 29 | 30 | #******************************************************************************* 31 | #Get command line arguments 32 | #******************************************************************************* 33 | IS_arg=len(sys.argv) 34 | if IS_arg < 4 : 35 | print('ERROR - A minimum of 3 arguments must be used') 36 | raise SystemExit(22) 37 | 38 | rrr_sts_csv=sys.argv[1] 39 | rrr_obs_shp=sys.argv[2] 40 | rrr_dig_csv=sys.argv[3] 41 | 42 | 43 | #******************************************************************************* 44 | #Print input information 45 | #******************************************************************************* 46 | print('Command line inputs') 47 | print('- '+rrr_sts_csv) 48 | print('- '+rrr_obs_shp) 49 | print('- '+rrr_dig_csv) 50 | 51 | 52 | #******************************************************************************* 53 | #Check if files exist 54 | #******************************************************************************* 55 | try: 56 | with open(rrr_sts_csv) as file: 57 | pass 58 | except IOError as e: 59 | print('ERROR - Unable to open '+rrr_sts_csv) 60 | raise SystemExit(22) 61 | 62 | try: 63 | with open(rrr_obs_shp) as file: 64 | pass 65 | except IOError as e: 66 | print('ERROR - Unable to open '+rrr_obs_shp) 67 | raise SystemExit(22) 68 | 69 | 70 | #******************************************************************************* 71 | #Read statistics 72 | #******************************************************************************* 73 | print('Read statistics') 74 | 75 | df_sts_csv=pandas.read_csv(rrr_sts_csv) 76 | IS_sts_csv=df_sts_csv.shape[0] 77 | 78 | print('- The number of river reaches in statistics file is: '+str(IS_sts_csv)) 79 | 80 | 81 | #******************************************************************************* 82 | #Read gauge shapefile 83 | #******************************************************************************* 84 | print('Read gauge shapefile') 85 | 86 | rrr_obs_lay=fiona.open(rrr_obs_shp, 'r') 87 | 88 | if 'Sttn_Nm' in rrr_obs_lay[0]['properties']: 89 | YS_obs_nam='Sttn_Nm' 90 | else: 91 | print('ERROR - Sttn_Nm does not exist in '+rrr_obs_shp) 92 | raise SystemExit(22) 93 | 94 | if 'rivid' in rrr_obs_lay[0]['properties']: 95 | YS_obs_ids='rivid' 96 | else: 97 | print('ERROR - rivid does not exist in '+rrr_obs_shp) 98 | raise SystemExit(22) 99 | 100 | IS_obs_shp=len(rrr_obs_lay) 101 | print('- The number of gauges in shapefile is: '+str(IS_obs_shp)) 102 | 103 | IV_obs_ids=[] 104 | for rrr_obs_fea in rrr_obs_lay: 105 | IV_obs_ids.append(rrr_obs_fea['properties'][YS_obs_ids]) 106 | 107 | 108 | #******************************************************************************* 109 | #Subsample statistics and add normalized metrics 110 | #******************************************************************************* 111 | print('Subsample statistics and add normalized metrics') 112 | 113 | df_sub_csv=df_sts_csv.copy(deep=True) 114 | 115 | df_sub_csv=df_sub_csv.loc[df_sub_csv['rivid'].isin(IV_obs_ids)] 116 | 117 | df_sub_csv['nRMSE']=df_sub_csv['RMSE']/df_sub_csv['Qobsbar'] 118 | df_sub_csv['nBias']=df_sub_csv['Bias']/df_sub_csv['Qobsbar'] 119 | df_sub_csv['nSTDE']=df_sub_csv['STDE']/df_sub_csv['Qobsbar'] 120 | 121 | print('- Done') 122 | 123 | 124 | #******************************************************************************* 125 | #Create digested statistics file 126 | #******************************************************************************* 127 | print('Create digested statistics file') 128 | 129 | df_dig_csv=pandas.DataFrame(columns=df_sub_csv.columns) 130 | 131 | df_dig_csv.loc[0]=df_sub_csv.mean() 132 | 133 | df_dig_csv.loc[1]=df_sub_csv.median() 134 | 135 | df_dig_csv.loc[2]=df_sub_csv.quantile(q=0.68) 136 | df_dig_csv.at[2,'Nash']=df_sub_csv['Nash'].quantile(q=0.32) 137 | #68% quantiles differ: normalized errors (small better) and Nash (large better) 138 | 139 | df_dig_csv.insert(1,'summary',['mean','median','68%_better_than']) 140 | 141 | df_dig_csv=df_dig_csv[['summary','nRMSE','nBias','nSTDE','Nash']] 142 | 143 | df_dig_csv.to_csv(path_or_buf=rrr_dig_csv,index=False,float_format='%.2f') 144 | 145 | print('- Done') 146 | 147 | 148 | #******************************************************************************* 149 | #End 150 | #******************************************************************************* 151 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_sts_tbl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_sts_tbl.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a subset list of river IDs where observations are available, several csv 8 | #files with comparison statistics involving various model simulations and a 9 | #common set of observations, this program produces a summary table that is saved 10 | #in a new csv file. 11 | #Author: 12 | #Cedric H. David, 2020-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Import Python modules 17 | #******************************************************************************* 18 | import sys 19 | import os.path 20 | import csv 21 | import pandas 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - rrr_obs_csv 28 | # 2 - rrr_sts_csv 29 | # . - rrr_sts_csv 30 | # n - rrr_sts_csv 31 | #n+1- rrr_tbl_csv 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg < 4 : 39 | print('ERROR - A minimum of 3 arguments must be used') 40 | raise SystemExit(22) 41 | 42 | rrr_obs_csv=sys.argv[1] 43 | rrr_tbl_csv=sys.argv[IS_arg-1] 44 | 45 | 46 | #******************************************************************************* 47 | #Print input information 48 | #******************************************************************************* 49 | print('Command line inputs') 50 | print('- '+rrr_obs_csv) 51 | print('- '+str(IS_arg-3)+' statistics file(s) provided') 52 | print('- '+rrr_tbl_csv) 53 | 54 | 55 | #******************************************************************************* 56 | #Check if files exist 57 | #******************************************************************************* 58 | try: 59 | with open(rrr_obs_csv) as file: 60 | pass 61 | except IOError as e: 62 | print('ERROR - Unable to open '+rrr_obs_csv) 63 | raise SystemExit(22) 64 | 65 | for JS_arg in range(2,IS_arg-1): 66 | rrr_sts_csv=sys.argv[JS_arg] 67 | try: 68 | with open(rrr_sts_csv) as file: 69 | pass 70 | except IOError as e: 71 | print('ERROR - Unable to open '+rrr_sts_csv) 72 | raise SystemExit(22) 73 | 74 | 75 | #******************************************************************************* 76 | #Read list of gauges 77 | #******************************************************************************* 78 | print('Read list of gauges') 79 | 80 | IV_obs_use_id=[] 81 | with open(rrr_obs_csv, 'r') as csvfile: 82 | csvreader = csv.reader(csvfile, dialect='excel') 83 | for row in csvreader: 84 | IV_obs_use_id.append(int(row[0])) 85 | 86 | IS_obs_use=len(IV_obs_use_id) 87 | print('- The subset used contains '+str(IS_obs_use)+' gauges') 88 | 89 | 90 | #******************************************************************************* 91 | #Create summary table 92 | #******************************************************************************* 93 | print('Create summary table') 94 | 95 | with open(rrr_tbl_csv, 'w') as csvfile: 96 | csvwriter = csv.writer(csvfile, dialect='excel') 97 | csvwriter.writerow(['Name', \ 98 | 'All avg Nash','All med Nash', \ 99 | 'Use avg Nash','Use med Nash', \ 100 | 'Oth avg Nash','Oth med Nash']) 101 | 102 | #-------------------------------------------------------------------------- 103 | #Loop over all statistics files 104 | #-------------------------------------------------------------------------- 105 | for JS_arg in range(2,IS_arg-1): 106 | rrr_sts_csv=sys.argv[JS_arg] 107 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 108 | #Make pandas dataframes for all stations, stations used, and others 109 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 110 | df_tot=pandas.read_csv(rrr_sts_csv) 111 | df_use=df_tot[df_tot['rivid'].isin(IV_obs_use_id)] 112 | df_oth=df_tot[~df_tot['rivid'].isin(IV_obs_use_id)] 113 | 114 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 115 | #Compute average and median values of Nash Sutcliffe Efficiency 116 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 117 | ZS_tot_avg=df_tot['Nash'].mean() 118 | ZS_tot_med=df_tot['Nash'].median() 119 | ZS_use_avg=df_use['Nash'].mean() 120 | ZS_use_med=df_use['Nash'].median() 121 | ZS_oth_avg=df_oth['Nash'].mean() 122 | ZS_oth_med=df_oth['Nash'].median() 123 | 124 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 125 | #Reformat values for clean writing 126 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 127 | IS_threshold=-1000 128 | if ZS_tot_avg > IS_threshold: 129 | YS_tot_avg='%.2f'%ZS_tot_avg 130 | else: 131 | YS_tot_avg='-Inf' 132 | if ZS_tot_med > IS_threshold: 133 | YS_tot_med='%.2f'%ZS_tot_med 134 | else: 135 | YS_tot_med='-Inf' 136 | if ZS_use_avg > IS_threshold: 137 | YS_use_avg='%.2f'%ZS_use_avg 138 | else: 139 | YS_use_avg='-Inf' 140 | if ZS_use_med > IS_threshold: 141 | YS_use_med='%.2f'%ZS_use_med 142 | else: 143 | YS_use_med='-Inf' 144 | if ZS_oth_avg > IS_threshold: 145 | YS_oth_avg='%.2f'%ZS_oth_avg 146 | else: 147 | YS_oth_avg='-Inf' 148 | if ZS_oth_med > IS_threshold: 149 | YS_oth_med='%.2f'%ZS_oth_med 150 | else: 151 | YS_oth_med='-Inf' 152 | 153 | YS_name=os.path.basename(rrr_sts_csv) 154 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 155 | #Write in summary table 156 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 157 | YV_write=[YS_name, \ 158 | YS_tot_avg,YS_tot_med, \ 159 | YS_use_avg,YS_use_med, \ 160 | YS_oth_avg,YS_oth_med] 161 | csvwriter.writerow(YV_write) 162 | 163 | print('- Done') 164 | 165 | 166 | #******************************************************************************* 167 | #End 168 | #******************************************************************************* 169 | -------------------------------------------------------------------------------- /src/rrr_anl_hyd_sum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_hyd_sum.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given n CSV files with timeseries, and the name of a new CSV file, this program 8 | #adds computes the sum of all timeseries and saves it in the new CSV file. 9 | #Author: 10 | #Cedric H. David, 2023-2023 11 | 12 | 13 | #******************************************************************************* 14 | #Import Python modules 15 | #******************************************************************************* 16 | import sys 17 | import os.path 18 | import csv 19 | import pandas 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables (given as command line arguments) 24 | #******************************************************************************* 25 | # 1 - rrr_spa_csv 26 | # 2 - rrr_spa_csv 27 | # . - rrr_spa_csv 28 | # n - rrr_spa_csv 29 | #n+1- rrr_sum_csv 30 | 31 | 32 | #******************************************************************************* 33 | #Get command line arguments 34 | #******************************************************************************* 35 | IS_arg=len(sys.argv) 36 | if IS_arg < 4 : 37 | print('ERROR - A minimum of 3 arguments must be used') 38 | raise SystemExit(22) 39 | 40 | rrr_sum_csv=sys.argv[IS_arg-1] 41 | 42 | 43 | #******************************************************************************* 44 | #Print input information 45 | #******************************************************************************* 46 | print('Command line inputs') 47 | print('- '+str(IS_arg-2)+' timeseries file(s) provided') 48 | print('- '+rrr_sum_csv) 49 | 50 | 51 | #******************************************************************************* 52 | #Check if files exist 53 | #******************************************************************************* 54 | for JS_arg in range(1,IS_arg-1): 55 | rrr_spa_csv=sys.argv[JS_arg] 56 | try: 57 | with open(rrr_spa_csv) as file: 58 | pass 59 | except IOError as e: 60 | print('ERROR - Unable to open '+rrr_spa_csv) 61 | raise SystemExit(22) 62 | 63 | 64 | #******************************************************************************* 65 | #Read first timeseries 66 | #******************************************************************************* 67 | print('Read first timeseries') 68 | 69 | rrr_spa_csv=sys.argv[1] 70 | #The first timeseries file 71 | 72 | df1=pandas.read_csv(rrr_spa_csv) 73 | #Read the csv file using Pandas 74 | 75 | YS_tim1=df1.columns.values[0] 76 | #The header of the first column which contains dates 77 | 78 | df1[YS_tim1]=pandas.to_datetime(df1[YS_tim1]) 79 | #Convert the first column to DateTime 80 | 81 | df1.set_index(YS_tim1,inplace=True) 82 | #Sets the index of the dataframe as the first column 83 | 84 | IS_tim1=df1.shape[0] 85 | #Number of timesteps 86 | 87 | ZV_tim1=df1.index.values.tolist() 88 | #Array with time values 89 | 90 | dfs=df1.copy(deep=True) 91 | #New object created with a copy of df1 92 | 93 | dfs.index.rename('Date',inplace=True) 94 | #Renamed index to 'Date' 95 | 96 | 97 | #******************************************************************************* 98 | #Read other timeseries 99 | #******************************************************************************* 100 | print('Read other timeseries') 101 | 102 | for JS_arg in range(2,IS_arg-1): 103 | 104 | rrr_spa_csv=sys.argv[JS_arg] 105 | #The first timeseries file 106 | 107 | dfj=pandas.read_csv(rrr_spa_csv) 108 | #Read the csv file using Pandas 109 | 110 | YS_timj=dfj.columns.values[0] 111 | #The header of the first column which contains dates 112 | 113 | dfj[YS_timj]=pandas.to_datetime(dfj[YS_timj]) 114 | #Convert the first column to DateTime 115 | 116 | dfj.set_index(YS_timj,inplace=True) 117 | #Sets the index of the dataframe as the first column 118 | 119 | IS_timj=dfj.shape[0] 120 | #Number of timesteps 121 | 122 | ZV_timj=dfj.index.values.tolist() 123 | #Array with time values 124 | 125 | if ZV_timj==ZV_tim1: 126 | dfs=dfs.add(dfj) 127 | else: 128 | print('ERROR - Unable inconsistent time values') 129 | raise SystemExit(22) 130 | 131 | 132 | #******************************************************************************* 133 | #Write CSV file 134 | #******************************************************************************* 135 | print('Write CSV file') 136 | 137 | dfs.to_csv(rrr_sum_csv) 138 | 139 | print('- Done') 140 | 141 | 142 | #******************************************************************************* 143 | #End 144 | #******************************************************************************* 145 | -------------------------------------------------------------------------------- /src/rrr_anl_shp_avg_obs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_shp_avg_obs.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a shapefile of river gauges, a CSV file with corresponding observations, 8 | #and the name of a new shapefile; this program computes the average of 9 | #observations and appends it as a new attribute to a copy of the input 10 | #shapefile. 11 | #Author: 12 | #Cedric H. David, 2023-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Import Python modules 17 | #******************************************************************************* 18 | import sys 19 | import fiona 20 | import pandas 21 | import numpy 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - rrr_obs_shp 28 | # 2 - rrr_Qob_csv 29 | # 3 - rrr_new_shp 30 | 31 | 32 | #******************************************************************************* 33 | #Get command line arguments 34 | #******************************************************************************* 35 | IS_arg=len(sys.argv) 36 | if IS_arg != 4: 37 | print('ERROR - 3 and only 3 arguments can be used') 38 | raise SystemExit(22) 39 | 40 | rrr_obs_shp=sys.argv[1] 41 | rrr_Qob_csv=sys.argv[2] 42 | rrr_new_shp=sys.argv[3] 43 | 44 | 45 | #******************************************************************************* 46 | #Print input information 47 | #******************************************************************************* 48 | print('Command line inputs') 49 | print('- '+rrr_obs_shp) 50 | print('- '+rrr_Qob_csv) 51 | print('- '+rrr_new_shp) 52 | 53 | 54 | #******************************************************************************* 55 | #Check if files exist 56 | #******************************************************************************* 57 | try: 58 | with open(rrr_obs_shp) as file: 59 | pass 60 | except IOError as e: 61 | print('ERROR - Unable to open '+rrr_obs_shp) 62 | raise SystemExit(22) 63 | 64 | try: 65 | with open(rrr_Qob_csv) as file: 66 | pass 67 | except IOError as e: 68 | print('ERROR - Unable to open '+rrr_Qob_csv) 69 | raise SystemExit(22) 70 | 71 | 72 | #******************************************************************************* 73 | #Read shapefile 74 | #******************************************************************************* 75 | print('Read shapefile') 76 | 77 | rrr_obs_lay=fiona.open(rrr_obs_shp, 'r') 78 | IS_obs_shp=len(rrr_obs_lay) 79 | print('- The number of gauges in shapefile is: '+str(IS_obs_shp)) 80 | 81 | if 'Sttn_Nm' in rrr_obs_lay[0]['properties']: 82 | YS_obs_nam='Sttn_Nm' 83 | else: 84 | print('ERROR - Sttn_Nm does not exist in '+rrr_obs_shp) 85 | raise SystemExit(22) 86 | 87 | YV_obs_nam=[] 88 | for rrr_obs_fea in rrr_obs_lay: 89 | YV_obs_nam.append(rrr_obs_fea['properties'][YS_obs_nam]) 90 | 91 | 92 | #******************************************************************************* 93 | #Read CSV file 94 | #******************************************************************************* 95 | print('Read CSV file') 96 | 97 | df1=pandas.read_csv(rrr_Qob_csv) 98 | #Read the csv file using Pandas 99 | 100 | YS_name=df1.columns.values[0] 101 | #The header of the first column which contains dates 102 | 103 | df1[YS_name]=pandas.to_datetime(df1[YS_name]) 104 | #Convert the first column to DateTime 105 | 106 | df1.set_index(YS_name,inplace=True) 107 | #Sets the index of the dataframe as the first column 108 | 109 | IS_time=df1.shape[0] 110 | IS_Qob_csv=df1.shape[1] 111 | YV_Qob_nam=df1.columns.tolist() 112 | ZV_Qav=df1.mean().tolist() 113 | 114 | print('- The number of gauges in CSV file is: '+str(IS_Qob_csv)) 115 | print('- The number of time steps in CSV file is: '+str(IS_time)) 116 | 117 | 118 | #******************************************************************************* 119 | #Check consistency 120 | #******************************************************************************* 121 | print('Check consistency') 122 | 123 | if IS_obs_shp!=IS_Qob_csv: 124 | print('ERROR - Inconsistent number of gauges') 125 | raise SystemExit(22) 126 | 127 | if not YV_Qob_nam==YV_obs_nam: 128 | print('ERROR - Inconsistent names of gauges') 129 | raise SystemExit(22) 130 | 131 | print('- Done') 132 | 133 | 134 | #******************************************************************************* 135 | #Copying shapefile and appending with average 136 | #******************************************************************************* 137 | print('Copying shapefile and appending with average') 138 | 139 | rrr_obs_crs=rrr_obs_lay.crs 140 | rrr_new_crs=rrr_obs_crs.copy() 141 | #print(rrr_new_crs) 142 | print('- Coordinate Reference System copied') 143 | 144 | rrr_obs_sch=rrr_obs_lay.schema 145 | rrr_new_sch=rrr_obs_sch.copy() 146 | rrr_new_sch['properties']['meanQ']='float:10.3' 147 | #print(rrr_new_sch) 148 | print('- Schema copied') 149 | 150 | rrr_new_lay=fiona.open(rrr_new_shp, 'w', \ 151 | crs=rrr_new_crs, \ 152 | driver='ESRI Shapefile', \ 153 | schema=rrr_new_sch \ 154 | ) 155 | print('- New shapefile created') 156 | 157 | for JS_obs_shp in range(IS_obs_shp): 158 | rrr_obs_fea=rrr_obs_lay[JS_obs_shp] 159 | rrr_obs_prp=rrr_obs_fea['properties'] 160 | rrr_obs_geo=rrr_obs_fea['geometry'] 161 | 162 | rrr_new_prp=rrr_obs_prp.copy() 163 | rrr_new_geo=rrr_obs_geo.copy() 164 | 165 | rrr_new_prp['meanQ']=round(ZV_Qav[JS_obs_shp],3) 166 | 167 | rrr_new_lay.write({ \ 168 | 'properties': rrr_new_prp, \ 169 | 'geometry': rrr_new_geo, \ 170 | }) 171 | print('- New shapefile populated') 172 | 173 | rrr_new_lay.close() 174 | print('- Closing shapefile so that values are saved') 175 | 176 | 177 | #******************************************************************************* 178 | #End 179 | #******************************************************************************* 180 | -------------------------------------------------------------------------------- /src/rrr_anl_spl_csv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_spl_csv.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a CSV file containing a sampling sequence (with river IDs and respective 8 | #times of observations) and a time, this script creates a new sampling sequence 9 | #in which all river IDs that were previously sampled at single/multiple times 10 | #are now only sampled together at a unique time. The river IDs that were never 11 | #sampled in the previous sequence are not either sampled in the new sequence. 12 | #Author: 13 | #Cedric H. David, 2021-2023 14 | 15 | 16 | #******************************************************************************* 17 | #Import Python modules 18 | #******************************************************************************* 19 | import sys 20 | import csv 21 | 22 | 23 | #******************************************************************************* 24 | #Declaration of variables (given as command line arguments) 25 | #******************************************************************************* 26 | # 1 - rrr_spl_csv 27 | # 2 - ZS_sp2_tim 28 | # 3 - rrr_sp2_csv 29 | 30 | 31 | #******************************************************************************* 32 | #Get command line arguments 33 | #******************************************************************************* 34 | IS_arg=len(sys.argv) 35 | if IS_arg != 4: 36 | print('ERROR - 3 and only 3 arguments can be used') 37 | raise SystemExit(22) 38 | 39 | rrr_spl_csv=sys.argv[1] 40 | ZS_sp2_tim=float(sys.argv[2]) 41 | rrr_sp2_csv=sys.argv[3] 42 | 43 | 44 | #******************************************************************************* 45 | #Print input information 46 | #******************************************************************************* 47 | print('Command line inputs') 48 | print('- '+rrr_spl_csv) 49 | print('- '+str(ZS_sp2_tim)) 50 | print('- '+rrr_sp2_csv) 51 | 52 | 53 | #******************************************************************************* 54 | #Check if files exist 55 | #******************************************************************************* 56 | try: 57 | with open(rrr_spl_csv) as file: 58 | pass 59 | except IOError as e: 60 | print('ERROR - Unable to open '+rrr_spl_csv) 61 | raise SystemExit(22) 62 | 63 | 64 | #******************************************************************************* 65 | #Opening rrr_spl_csv 66 | #******************************************************************************* 67 | print('Opening rrr_spl_csv') 68 | 69 | IV_riv_tot_id=[] 70 | IV_spl_cnt=[] 71 | IM_mea_tim=[] 72 | with open(rrr_spl_csv) as csv_file: 73 | reader=csv.reader(csv_file,dialect='excel',quoting=csv.QUOTE_NONNUMERIC) 74 | for row in reader: 75 | if len(row[2:])==int(row[1]): 76 | IV_riv_tot_id.append(int(row[0])) 77 | IV_spl_cnt.append(int(row[1])) 78 | IM_mea_tim.append(row[2:]) 79 | else: 80 | print('ERROR - This file is inconsistent: '+rrr_spl_csv) 81 | raise SystemExit(22) 82 | 83 | IS_riv_tot=len(IV_riv_tot_id) 84 | IS_spl_max=max(IV_spl_cnt) 85 | IS_spl_tot=sum(IV_spl_cnt) 86 | print('- The number of river reaches in subsample file is: '+str(IS_riv_tot)) 87 | print('- The maximum number of subsamples per river reach is: '+str(IS_spl_max)) 88 | print('- The total number of subsample/river reach pairs is: '+str(IS_spl_tot)) 89 | 90 | 91 | #******************************************************************************* 92 | #Creating new sequence for rrr_sp2_csv 93 | #******************************************************************************* 94 | print('Creating new sequence for rrr_sp2_csv') 95 | 96 | IV_sp2_cnt=[0]*IS_riv_tot 97 | IM_me2_tim=[[]]*IS_riv_tot 98 | for JS_riv_tot in range(IS_riv_tot): 99 | if IV_spl_cnt[JS_riv_tot]>0: 100 | IV_sp2_cnt[JS_riv_tot]=1 101 | IM_me2_tim[JS_riv_tot]=[ZS_sp2_tim] 102 | 103 | IS_sp2_max=max(IV_sp2_cnt) 104 | IS_sp2_tot=sum(IV_sp2_cnt) 105 | 106 | print('- The number of river reaches in subsample file is: '+str(IS_riv_tot)) 107 | print('- The maximum number of subsamples per river reach is: '+str(IS_sp2_max)) 108 | print('- The total number of subsample/river reach pairs is: '+str(IS_sp2_tot)) 109 | 110 | 111 | #******************************************************************************* 112 | #Writing rrr_sp2_csv 113 | #******************************************************************************* 114 | print('Writing rrr_sp2_csv') 115 | 116 | with open(rrr_sp2_csv, 'w') as csvfile: 117 | csvwriter = csv.writer(csvfile, dialect='excel') 118 | for JS_riv_tot in range(IS_riv_tot): 119 | IS_riv_id=IV_riv_tot_id[JS_riv_tot] 120 | IV_line=[IS_riv_id,IV_sp2_cnt[JS_riv_tot]] 121 | IV_line=IV_line+IM_me2_tim[JS_riv_tot] 122 | csvwriter.writerow(IV_line) 123 | 124 | 125 | #******************************************************************************* 126 | #End 127 | #******************************************************************************* 128 | -------------------------------------------------------------------------------- /src/rrr_anl_spl_mul.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_anl_spl_mul.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a CSV file containing a sampling sequence (with river IDs and respective 8 | #times of observations), a repeat cycle, and a number of satellites, this script 9 | #creates a new sampling sequence in which all river IDs that were previously 10 | #sampled by a single satellite are now sampled by multiple equidistant such 11 | #satellites. The river IDs that were never sampled in the previous sequence are 12 | #not either sampled in the new sequence. 13 | #Author: 14 | #Cedric H. David, 2021-2023 15 | 16 | 17 | #******************************************************************************* 18 | #Import Python modules 19 | #******************************************************************************* 20 | import sys 21 | import csv 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - rrr_spl_csv 28 | # 2 - ZS_rep 29 | # 3 - IS_sat 30 | # 4 - rrr_sp2_csv 31 | 32 | 33 | #******************************************************************************* 34 | #Get command line arguments 35 | #******************************************************************************* 36 | IS_arg=len(sys.argv) 37 | if IS_arg != 5: 38 | print('ERROR - 4 and only 4 arguments can be used') 39 | raise SystemExit(22) 40 | 41 | rrr_spl_csv=sys.argv[1] 42 | ZS_rep=float(sys.argv[2]) 43 | IS_sat=int(sys.argv[3]) 44 | rrr_sp2_csv=sys.argv[4] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+rrr_spl_csv) 52 | print('- '+str(ZS_rep)) 53 | print('- '+str(IS_sat)) 54 | print('- '+rrr_sp2_csv) 55 | 56 | 57 | #******************************************************************************* 58 | #Check if files exist 59 | #******************************************************************************* 60 | try: 61 | with open(rrr_spl_csv) as file: 62 | pass 63 | except IOError as e: 64 | print('ERROR - Unable to open '+rrr_spl_csv) 65 | raise SystemExit(22) 66 | 67 | 68 | #******************************************************************************* 69 | #Opening rrr_spl_csv 70 | #******************************************************************************* 71 | print('Opening rrr_spl_csv') 72 | 73 | IV_riv_tot_id=[] 74 | IV_spl_cnt=[] 75 | IM_mea_tim=[] 76 | with open(rrr_spl_csv) as csv_file: 77 | reader=csv.reader(csv_file,dialect='excel',quoting=csv.QUOTE_NONNUMERIC) 78 | for row in reader: 79 | if len(row[2:])==int(row[1]): 80 | IV_riv_tot_id.append(int(row[0])) 81 | IV_spl_cnt.append(int(row[1])) 82 | IM_mea_tim.append(row[2:]) 83 | else: 84 | print('ERROR - This file is inconsistent: '+rrr_spl_csv) 85 | raise SystemExit(22) 86 | 87 | IS_riv_tot=len(IV_riv_tot_id) 88 | IS_spl_max=max(IV_spl_cnt) 89 | IS_spl_tot=sum(IV_spl_cnt) 90 | print('- The number of river reaches in subsample file is: '+str(IS_riv_tot)) 91 | print('- The maximum number of subsamples per river reach is: '+str(IS_spl_max)) 92 | print('- The total number of subsample/river reach pairs is: '+str(IS_spl_tot)) 93 | 94 | 95 | #******************************************************************************* 96 | #Creating new sequence for rrr_sp2_csv 97 | #******************************************************************************* 98 | print('Creating new sequence for rrr_sp2_csv') 99 | 100 | IV_sp2_cnt=[0]*IS_riv_tot 101 | IM_me2_tim=[[]]*IS_riv_tot 102 | for JS_riv_tot in range(IS_riv_tot): 103 | IV_sp2_cnt[JS_riv_tot]=IS_sat*IV_spl_cnt[JS_riv_tot] 104 | IM_me2_tim[JS_riv_tot]=[-9999]*IV_sp2_cnt[JS_riv_tot] 105 | for JS_spl_cnt in range(IV_spl_cnt[JS_riv_tot]): 106 | for JS_sat in range(IS_sat): 107 | ZS_me2_tim=IM_mea_tim[JS_riv_tot][JS_spl_cnt] \ 108 | +JS_sat*ZS_rep/IS_sat 109 | if ZS_me2_tim>=ZS_rep: ZS_me2_tim=ZS_me2_tim-ZS_rep 110 | if ZS_me2_tim<0: 111 | print('ERROR - ZS_me2_tim is negative: ',ZS_me2_tim) 112 | raise SystemExit(22) 113 | IM_me2_tim[JS_riv_tot][JS_spl_cnt*IS_sat+JS_sat]=ZS_me2_tim 114 | IM_me2_tim[JS_riv_tot].sort() 115 | IS_sp2_max=max(IV_sp2_cnt) 116 | IS_sp2_tot=sum(IV_sp2_cnt) 117 | 118 | print('- The number of river reaches in subsample file is: '+str(IS_riv_tot)) 119 | print('- The maximum number of subsamples per river reach is: '+str(IS_sp2_max)) 120 | print('- The total number of subsample/river reach pairs is: '+str(IS_sp2_tot)) 121 | 122 | 123 | #******************************************************************************* 124 | #Writing rrr_sp2_csv 125 | #******************************************************************************* 126 | print('Writing rrr_sp2_csv') 127 | 128 | with open(rrr_sp2_csv, 'w') as csvfile: 129 | csvwriter = csv.writer(csvfile, dialect='excel') 130 | for JS_riv_tot in range(IS_riv_tot): 131 | IS_riv_id=IV_riv_tot_id[JS_riv_tot] 132 | IV_line=[IS_riv_id,IV_sp2_cnt[JS_riv_tot]] 133 | IV_line=IV_line+IM_me2_tim[JS_riv_tot] 134 | csvwriter.writerow(IV_line) 135 | 136 | 137 | #******************************************************************************* 138 | #End 139 | #******************************************************************************* 140 | -------------------------------------------------------------------------------- /src/rrr_arc_cat_hydrosheds.py: -------------------------------------------------------------------------------- 1 | #WARNING! 2 | #This open-source code uses the following proprietary software: 3 | #(ArcGIS Desktop 10.5 w/ Spatial Analyst) 4 | #******************************************************************************* 5 | #rrr_arc_cat_hydrosheds.py 6 | #******************************************************************************* 7 | 8 | #Purpose: 9 | #Given a river shapefile and a flow direction grid from HydroSHEDS, this program 10 | #uses ArcGIS to create a catchment shapefile that has one and only one catchment 11 | #per river reach. 12 | #Author: 13 | #Cedric H. David, 2017-2023 14 | 15 | 16 | #******************************************************************************* 17 | #Import Python modules 18 | #******************************************************************************* 19 | import arcpy 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables 24 | #******************************************************************************* 25 | arc_dir_ras = 'Z:\\Data\\Work\\Research\\GIS\\Datasets\\HydroSHEDS\\original\\usgs.gov\\unzip\\na_dir_15s\\na_dir_15s' 26 | arc_riv_shp = 'Z:\\Data\\Work\\Research\\GIS\\Datasets\\HydroSHEDS\\original\\usgs.gov\\unzip\\na_riv_15s.shp' 27 | arc_cat_shp = 'Z:\\Data\\Work\\Research\\GIS\\Projects\\HydroSHEDS\\shp\\na_cat_15s.shp' 28 | 29 | 30 | #******************************************************************************* 31 | #Temporary variables 32 | #******************************************************************************* 33 | arc_riv_ras = arc_riv_shp[:-4]+'.img' 34 | arc_cat_ras = arc_cat_shp[:-4]+'.img' 35 | arc_tmp_shp = arc_cat_shp[:-4]+'_tmp.shp' 36 | 37 | 38 | #******************************************************************************* 39 | #Allow for overwriting of outputs 40 | #******************************************************************************* 41 | arcpy.env.overwriteOutput = True 42 | 43 | 44 | #******************************************************************************* 45 | #Convert river polyline to river raster 46 | #******************************************************************************* 47 | arcpy.CheckOutExtension('Spatial') 48 | #Need to check out the spatial analyst extension before using it 49 | 50 | arcpy.env.outputCoordinateSystem = arc_dir_ras 51 | arcpy.env.extent = arc_dir_ras 52 | arcpy.env.snapRaster = arc_dir_ras 53 | arcpy.PolylineToRaster_conversion(arc_riv_shp, 'ARCID', arc_riv_ras, '', \ 54 | 'UP_CELLS', arc_dir_ras) 55 | #The last argument here, the optional 'cell_size', can point to existing raster. 56 | #One could have used arcpy.FeatureToRaster_conversion here, but that tool does 57 | #not allow to set a priority field (here 'UP_CELLS'), without which one cannot 58 | #ensure one-to-one relationships between reaches and catchments (and vice versa) 59 | 60 | arcpy.CheckInExtension('Spatial') 61 | #Checking the spatial analyst extension back in after using it 62 | 63 | 64 | #******************************************************************************* 65 | #Create catchment raster 66 | #******************************************************************************* 67 | arcpy.CheckOutExtension('Spatial') 68 | #Need to check out the spatial analyst extension before using it 69 | 70 | arcpy.gp.Watershed_sa(arc_dir_ras, arc_riv_ras, arc_cat_ras, 'Value') 71 | 72 | arcpy.CheckInExtension('Spatial') 73 | #Checking the spatial analyst extension back in after using it 74 | 75 | 76 | #******************************************************************************* 77 | #Create catchment polygon 78 | #******************************************************************************* 79 | arcpy.RasterToPolygon_conversion(arc_cat_ras, arc_tmp_shp, 'NO_SIMPLIFY', \ 80 | 'Value') 81 | 82 | 83 | #******************************************************************************* 84 | #Clean up attributes of catchment polygon 85 | #******************************************************************************* 86 | arcpy.AddField_management(arc_tmp_shp, 'ARCID', 'Integer') 87 | #Creates a new attribute called 'ARCID' 88 | 89 | arcpy.CalculateField_management(arc_tmp_shp, 'ARCID', '!gridcode!', 'PYTHON', \ 90 | '') 91 | #Copy the values of attribute 'gridcode' into 'ARCID' 92 | 93 | arcpy.DeleteField_management(arc_tmp_shp, 'gridcode') 94 | #Delete the 'gridcode' field 95 | 96 | arcpy.DeleteField_management(arc_tmp_shp, 'Id') 97 | #Delete the 'Id' field 98 | 99 | arcpy.Dissolve_management(arc_tmp_shp, arc_cat_shp, 'ARCID', '', 'MULTI_PART') 100 | #Dissolve catchment shapefile to make sure each ARCID has a unique catchment 101 | 102 | 103 | #******************************************************************************* 104 | #Delete temporary files 105 | #******************************************************************************* 106 | arcpy.Delete_management(arc_riv_ras) 107 | arcpy.Delete_management(arc_cat_ras) 108 | arcpy.Delete_management(arc_tmp_shp) 109 | 110 | 111 | #******************************************************************************* 112 | #End 113 | #******************************************************************************* 114 | -------------------------------------------------------------------------------- /src/rrr_cat_bas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_cat_bas.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a catchment file (CSV) and a list of river IDs (CSV), this program 8 | #creates a new catchment file (CSV) that is a subset of the original file with 9 | #the following information: 10 | # - rrr_cat_csv 11 | # . Catchment ID 12 | # . Catchment contributing area in square kilometers 13 | # . Longitude of catchment centroid 14 | # . Latitude of catchment centroid 15 | #Author: 16 | #Cedric H. David, 2020-2023 17 | 18 | 19 | #******************************************************************************* 20 | #Import Python modules 21 | #******************************************************************************* 22 | import sys 23 | import csv 24 | 25 | 26 | #******************************************************************************* 27 | #Declaration of variables (given as command line arguments) 28 | #******************************************************************************* 29 | # 1 - rrr_cat_csv 30 | # 2 - rrr_bas_csv 31 | # 3 - rrr_cbs_csv 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg != 4: 39 | print('ERROR - 3 and only 3 arguments can be used') 40 | raise SystemExit(22) 41 | 42 | rrr_cat_csv=sys.argv[1] 43 | rrr_bas_csv=sys.argv[2] 44 | rrr_cbs_csv=sys.argv[3] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+rrr_cat_csv) 52 | print('- '+rrr_bas_csv) 53 | print('- '+rrr_cbs_csv) 54 | 55 | 56 | #******************************************************************************* 57 | #Check if files exist 58 | #******************************************************************************* 59 | try: 60 | with open(rrr_cat_csv) as file: 61 | pass 62 | except IOError as e: 63 | print('ERROR - Unable to open '+rrr_cat_csv) 64 | raise SystemExit(22) 65 | 66 | try: 67 | with open(rrr_bas_csv) as file: 68 | pass 69 | except IOError as e: 70 | print('ERROR - Unable to open '+rrr_bas_csv) 71 | raise SystemExit(22) 72 | 73 | 74 | #******************************************************************************* 75 | #Reading catchment file 76 | #******************************************************************************* 77 | print('Reading catchment file') 78 | 79 | IV_cat_tot_id=[] 80 | ZV_cat_tot_skm=[] 81 | ZV_cat_tot_lon=[] 82 | ZV_cat_tot_lat=[] 83 | with open(rrr_cat_csv,'r') as csvfile: 84 | csvreader=csv.reader(csvfile) 85 | for row in csvreader: 86 | IV_cat_tot_id.append(int(row[0])) 87 | ZV_cat_tot_skm.append(float(row[1])) 88 | ZV_cat_tot_lon.append(float(row[2])) 89 | ZV_cat_tot_lat.append(float(row[3])) 90 | 91 | IS_cat_tot=len(IV_cat_tot_id) 92 | print('- Number of catchments in rrr_cat_csv: '+str(IS_cat_tot)) 93 | 94 | 95 | #******************************************************************************* 96 | #Reading basin file 97 | #******************************************************************************* 98 | print('Reading basin file') 99 | 100 | IV_riv_bas_id=[] 101 | with open(rrr_bas_csv,'r') as csvfile: 102 | csvreader=csv.reader(csvfile) 103 | for row in csvreader: 104 | IV_riv_bas_id.append(int(row[0])) 105 | 106 | IS_riv_bas=len(IV_riv_bas_id) 107 | print('- Number of river reaches in rrr_bas_csv: '+str(IS_riv_bas)) 108 | 109 | 110 | #******************************************************************************* 111 | #Creating hash table 112 | #******************************************************************************* 113 | print('Creating hash table') 114 | 115 | IM_hsh={} 116 | for JS_cat_tot in range(IS_cat_tot): 117 | IM_hsh[IV_cat_tot_id[JS_cat_tot]]=JS_cat_tot 118 | 119 | print("- Hash table created") 120 | 121 | 122 | #******************************************************************************* 123 | #Checking all river reaches in basin are in catchment file 124 | #******************************************************************************* 125 | print('Checking all river reaches in basin are in catchment file') 126 | 127 | for JS_riv_bas in range(IS_riv_bas): 128 | if IV_riv_bas_id[JS_riv_bas] not in IM_hsh: 129 | print('ERROR - river ID '+str(IV_riv_bas_id[JS_riv_bas])+' not in ' \ 130 | +rrr_cat_csv) 131 | raise SystemExit(22) 132 | 133 | print("- Success") 134 | 135 | 136 | #******************************************************************************* 137 | #Creating new catchment information 138 | #******************************************************************************* 139 | print('Creating new catchment information') 140 | 141 | IV_riv_bas_id_srt=IV_riv_bas_id 142 | IV_riv_bas_id_srt.sort() 143 | 144 | IV_cat_bas_id=[-9999]*IS_riv_bas 145 | ZV_cat_bas_skm=[-9999]*IS_riv_bas 146 | ZV_cat_bas_lon=[-9999]*IS_riv_bas 147 | ZV_cat_bas_lat=[-9999]*IS_riv_bas 148 | for JS_riv_bas in range(IS_riv_bas): 149 | JS_cat_tot=IM_hsh[IV_riv_bas_id_srt[JS_riv_bas]] 150 | IV_cat_bas_id[JS_riv_bas] =IV_cat_tot_id[JS_cat_tot] 151 | ZV_cat_bas_skm[JS_riv_bas]=ZV_cat_tot_skm[JS_cat_tot] 152 | ZV_cat_bas_lon[JS_riv_bas]=ZV_cat_tot_lon[JS_cat_tot] 153 | ZV_cat_bas_lat[JS_riv_bas]=ZV_cat_tot_lat[JS_cat_tot] 154 | 155 | print("- Done") 156 | 157 | 158 | #******************************************************************************* 159 | #Writing new catchment file 160 | #******************************************************************************* 161 | print('Writing new catchment file') 162 | with open(rrr_cbs_csv, 'w') as csvfile: 163 | csvwriter = csv.writer(csvfile, dialect='excel') 164 | for JS_riv_bas in range(IS_riv_bas): 165 | IV_line=[IV_cat_bas_id[JS_riv_bas], \ 166 | ZV_cat_bas_skm[JS_riv_bas], \ 167 | ZV_cat_bas_lon[JS_riv_bas], \ 168 | ZV_cat_bas_lat[JS_riv_bas]] 169 | csvwriter.writerow(IV_line) 170 | 171 | 172 | #******************************************************************************* 173 | #End 174 | #******************************************************************************* 175 | -------------------------------------------------------------------------------- /src/rrr_cat_tot_gen_one_meritbasins.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_cat_tot_gen_one_meritbasins.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a river shapefile from MERIT Basins, this program creates a csv file with 8 | #the following information: 9 | # - rrr_cat_csv 10 | # . Catchment ID 11 | # . Catchment contributing area in square kilometers 12 | # . Longitude of catchment centroid 13 | # . Latitude of catchment centroid 14 | #Author: 15 | #Cedric H. David, 2022-2023 16 | 17 | 18 | #******************************************************************************* 19 | #Import Python modules 20 | #******************************************************************************* 21 | import sys 22 | import fiona 23 | import shapely.geometry 24 | import csv 25 | 26 | 27 | #******************************************************************************* 28 | #Declaration of variables (given as command line arguments) 29 | #******************************************************************************* 30 | # 1 - mer_cat_shp 31 | # 2 - rrr_cat_csv 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg != 3: 39 | print('ERROR - 2 and only 2 arguments can be used') 40 | raise SystemExit(22) 41 | 42 | mer_cat_shp=sys.argv[1] 43 | rrr_cat_csv=sys.argv[2] 44 | 45 | 46 | #******************************************************************************* 47 | #Print input information 48 | #******************************************************************************* 49 | print('Command line inputs') 50 | print('- '+mer_cat_shp) 51 | print('- '+rrr_cat_csv) 52 | 53 | 54 | #******************************************************************************* 55 | #Check if files exist 56 | #******************************************************************************* 57 | try: 58 | with open(mer_cat_shp) as file: 59 | pass 60 | except IOError as e: 61 | print('ERROR - Unable to open '+mer_cat_shp) 62 | raise SystemExit(22) 63 | 64 | 65 | #******************************************************************************* 66 | #Read shapefile 67 | #******************************************************************************* 68 | print('Read shapefile') 69 | 70 | #------------------------------------------------------------------------------- 71 | #Open file 72 | #------------------------------------------------------------------------------- 73 | print('- Open file') 74 | 75 | mer_cat_lay=fiona.open(mer_cat_shp, 'r') 76 | IS_cat_tot=len(mer_cat_lay) 77 | print('- The number of catchment features is: '+str(IS_cat_tot)) 78 | 79 | #------------------------------------------------------------------------------- 80 | #Read attributes 81 | #------------------------------------------------------------------------------- 82 | print('- Read attributes') 83 | 84 | if 'COMID' in mer_cat_lay[0]['properties']: 85 | YV_cat_id='COMID' 86 | else: 87 | print('ERROR - COMID does not exist in '+mer_cat_shp) 88 | raise SystemExit(22) 89 | 90 | if 'areasqkm' in mer_cat_lay[0]['properties']: 91 | YV_cat_sqkm='areasqkm' 92 | elif 'unitarea' in mer_cat_lay[0]['properties']: 93 | YV_cat_sqkm='unitarea' 94 | else: 95 | print('ERROR - Neither areasqkm nor unitarea exist in '+mer_cat_shp) 96 | raise SystemExit(22) 97 | 98 | IV_cat_tot_id=[] 99 | ZV_cat_sqkm=[] 100 | for JS_cat_tot in range(IS_cat_tot): 101 | mer_cat_prp=mer_cat_lay[JS_cat_tot]['properties'] 102 | IV_cat_tot_id.append(int(mer_cat_prp[YV_cat_id])) 103 | ZV_cat_sqkm.append(float(mer_cat_prp[YV_cat_sqkm])) 104 | 105 | #------------------------------------------------------------------------------- 106 | #Read geometry 107 | #------------------------------------------------------------------------------- 108 | print('- Read geometry') 109 | 110 | ZV_cat_x_cen=[] 111 | ZV_cat_y_cen=[] 112 | for JS_cat_tot in range(IS_cat_tot): 113 | mer_cat_geo=mer_cat_lay[JS_cat_tot]['geometry'] 114 | #extracted the geometry for one feature in this shapefile. 115 | mer_cat_pnt=shapely.geometry.shape(mer_cat_geo).centroid 116 | #create the centroid for this one feature. 117 | mer_cat_cen=mer_cat_pnt.coords[:][0] 118 | #extracted the coordinates for the centroid 119 | ZV_cat_x_cen.append(mer_cat_cen[0]) 120 | ZV_cat_y_cen.append(mer_cat_cen[1]) 121 | #assigned coordinates to array 122 | 123 | 124 | #******************************************************************************* 125 | #Write outputs 126 | #******************************************************************************* 127 | print('Writing files') 128 | 129 | with open(rrr_cat_csv, 'w') as csvfile: 130 | csvwriter = csv.writer(csvfile, dialect='excel') 131 | for JS_cat_tot in range(IS_cat_tot): 132 | IV_line=[IV_cat_tot_id[JS_cat_tot], \ 133 | round(ZV_cat_sqkm[JS_cat_tot],4), \ 134 | ZV_cat_x_cen[JS_cat_tot], \ 135 | ZV_cat_y_cen[JS_cat_tot]] 136 | csvwriter.writerow(IV_line) 137 | 138 | 139 | #******************************************************************************* 140 | #End 141 | #******************************************************************************* 142 | -------------------------------------------------------------------------------- /src/rrr_cat_tot_gen_one_nhdplus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_cat_tot_gen_one_nhdplus.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a catchment shapefile from NHDPlus, this program creates a csv file with 8 | #the following information: 9 | # - rrr_cat_file 10 | # . Catchment ID 11 | # . Catchment contributing area in square kilometers 12 | # . Longitude of catchment centroid 13 | # . Latitude of catchment centroid 14 | #Author: 15 | #Cedric H. David, 2007-2023 16 | 17 | 18 | #******************************************************************************* 19 | #Import Python modules 20 | #******************************************************************************* 21 | import sys 22 | import csv 23 | import dbf 24 | import shapefile 25 | 26 | 27 | #******************************************************************************* 28 | #Declaration of variables (given as command line arguments) 29 | #******************************************************************************* 30 | # 1 - nhd_cat_file 31 | # 2 - rrr_cat_file 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg != 3: 39 | print('ERROR - 2 and only 2 arguments can be used') 40 | raise SystemExit(22) 41 | 42 | nhd_cat_file=sys.argv[1] 43 | rrr_cat_file=sys.argv[2] 44 | 45 | 46 | #******************************************************************************* 47 | #Print input information 48 | #******************************************************************************* 49 | print('Command line inputs') 50 | print('- '+nhd_cat_file) 51 | print('- '+rrr_cat_file) 52 | 53 | 54 | #******************************************************************************* 55 | #Check if files exist 56 | #******************************************************************************* 57 | try: 58 | with open(nhd_cat_file) as file: 59 | pass 60 | except IOError as e: 61 | print('ERROR - Unable to open '+nhd_cat_file) 62 | raise SystemExit(22) 63 | 64 | 65 | #******************************************************************************* 66 | #Read shapefile 67 | #******************************************************************************* 68 | print('Read shapefile') 69 | 70 | #------------------------------------------------------------------------------- 71 | #Read reach IDs (much faster using dbf than shapefile module) 72 | #------------------------------------------------------------------------------- 73 | print('- Read attributes') 74 | nhd_cat_dbf=dbf.Table(nhd_cat_file) 75 | nhd_cat_dbf.open() 76 | 77 | record=nhd_cat_dbf[0] 78 | if hasattr(record,'comid'): 79 | YS_id_name='comid' 80 | elif hasattr(record,'featureid'): 81 | YS_id_name='featureid' 82 | else: 83 | print('ERROR - No attribute named comid or featureid in '+nhd_cat_file) 84 | raise SystemExit(22) 85 | 86 | IV_cat_tot_id=[] 87 | ZV_cat_sqkm=[] 88 | for record in nhd_cat_dbf: 89 | IV_cat_tot_id.append(record[YS_id_name]) 90 | ZV_cat_sqkm.append(record['areasqkm']) 91 | 92 | IS_cat_tot=len(IV_cat_tot_id) 93 | 94 | #------------------------------------------------------------------------------- 95 | #Reading shapes 96 | #------------------------------------------------------------------------------- 97 | print('- Read shapes') 98 | nhd_cat_shp=shapefile.Reader(nhd_cat_file) 99 | 100 | #For formulas used here, see: 101 | #https://en.wikipedia.org/wiki/Centroid#Centroid_of_polygon 102 | ZV_cat_x_cen=[] 103 | ZV_cat_y_cen=[] 104 | for JS_cat_tot in range(IS_cat_tot): 105 | shape=nhd_cat_shp.shape(JS_cat_tot) 106 | #Current polygon feature in the shapefile 107 | shpoints=shape.points 108 | #Object with all vertices of the current polygon feature 109 | IS_point=len(shpoints) 110 | #Number of vertices in the current polygon feature 111 | IV_idx=shape.parts 112 | #Start indices for each part of the multipart polygon feature 113 | IS_part=len(IV_idx) 114 | #Number of parts in the multipart polygon feature 115 | IV_idx.append(IS_point) 116 | #Add the total number of vertices to allow for looping from start to end 117 | ZV_prt_area=[] 118 | ZV_prt_x_cen=[] 119 | ZV_prt_y_cen=[] 120 | #area, longitude and latitude of each part of the multipart 121 | for JS_part in range(IS_part): 122 | #Start/end indices of the part in the multipart polygon shapefiles 123 | IS_p_str=IV_idx[JS_part] 124 | IS_p_end=IV_idx[JS_part+1] 125 | #Area of each part in the multipart polygon shapefile: 126 | ZS_area=0 127 | for JS_point in range(IS_p_str,IS_p_end-1): 128 | ZS_area += ( shpoints[JS_point ][0]*shpoints[JS_point+1][1] \ 129 | -shpoints[JS_point+1][0]*shpoints[JS_point ][1]) 130 | ZS_area += ( shpoints[IS_p_end-1][0]*shpoints[IS_p_str ][1] \ 131 | -shpoints[IS_p_str ][0]*shpoints[IS_p_end-1][1]) 132 | ZS_area /= 2 133 | #Centroid of each part in the multipart polygon shapefile: 134 | ZS_x=0 135 | ZS_y=0 136 | for JS_point in range(IS_p_str,IS_p_end-1): 137 | ZS_x += ( shpoints[JS_point ][0]+shpoints[JS_point+1][0] ) \ 138 | *( shpoints[JS_point ][0]*shpoints[JS_point+1][1] \ 139 | -shpoints[JS_point+1][0]*shpoints[JS_point ][1]) 140 | ZS_y += ( shpoints[JS_point ][1]+shpoints[JS_point+1][1] ) \ 141 | *( shpoints[JS_point ][0]*shpoints[JS_point+1][1] \ 142 | -shpoints[JS_point+1][0]*shpoints[JS_point ][1]) 143 | ZS_x += ( shpoints[IS_p_end-1][0]+shpoints[IS_p_str ][0] ) \ 144 | *( shpoints[IS_p_end-1][0]*shpoints[IS_p_str ][1] \ 145 | -shpoints[IS_p_str ][0]*shpoints[IS_p_end-1][1]) 146 | ZS_y += ( shpoints[IS_p_end-1][1]+shpoints[IS_p_str ][1] ) \ 147 | *( shpoints[IS_p_end-1][0]*shpoints[IS_p_str ][1] \ 148 | -shpoints[IS_p_str ][0]*shpoints[IS_p_end-1][1]) 149 | ZS_x /= (ZS_area * 6.0) 150 | ZS_y /= (ZS_area * 6.0) 151 | #Append area and centroid coordinates of each part in the multipart: 152 | ZV_prt_area.append(ZS_area) 153 | ZV_prt_x_cen.append(ZS_x) 154 | ZV_prt_y_cen.append(ZS_y) 155 | #Calculate and append centroid coordinates of polygon in the shapefile: 156 | ZS_cat_x_cen =sum([ZV_prt_area[i]*ZV_prt_x_cen[i] for i in range(IS_part)]) 157 | ZS_cat_x_cen /=sum(ZV_prt_area) 158 | ZV_cat_x_cen.append(ZS_cat_x_cen) 159 | ZS_cat_y_cen =sum([ZV_prt_area[i]*ZV_prt_y_cen[i] for i in range(IS_part)]) 160 | ZS_cat_y_cen /=sum(ZV_prt_area) 161 | ZV_cat_y_cen.append(ZS_cat_y_cen) 162 | 163 | print('- Total number of catchments: '+str(IS_cat_tot)) 164 | 165 | 166 | #******************************************************************************* 167 | #Write outputs 168 | #******************************************************************************* 169 | print('Writing files') 170 | 171 | with open(rrr_cat_file, 'w') as csvfile: 172 | csvwriter = csv.writer(csvfile, dialect='excel') 173 | for JS_cat_tot in range(IS_cat_tot): 174 | IV_line=[IV_cat_tot_id[JS_cat_tot], \ 175 | round(ZV_cat_sqkm[JS_cat_tot],4), \ 176 | ZV_cat_x_cen[JS_cat_tot], \ 177 | ZV_cat_y_cen[JS_cat_tot]] 178 | csvwriter.writerow(IV_line) 179 | 180 | 181 | #******************************************************************************* 182 | #End 183 | #******************************************************************************* 184 | -------------------------------------------------------------------------------- /src/rrr_cpl_riv_lsm_att.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_cpl_riv_lsm_att.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given and netCDF file and a list of attributes (title, institution, comment, 8 | #semi major axis, and inverse flattening), this program updates the attributes 9 | #of the netCDF file. 10 | #Author: 11 | #Cedric H. David, 2019-2019 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import netCDF4 19 | 20 | 21 | #******************************************************************************* 22 | #Declaration of variables (given as command line arguments) 23 | #******************************************************************************* 24 | # 1 - rrr_dat_ncf 25 | # 2 - rrr_tit_str 26 | # 3 - rrr_ins_str 27 | # 4 - rrr_com_str 28 | # 5 - ZS_sem 29 | # 6 - ZS_inv 30 | 31 | 32 | #******************************************************************************* 33 | #Get command line arguments 34 | #******************************************************************************* 35 | IS_arg=len(sys.argv) 36 | if IS_arg != 7: 37 | print('ERROR - 6 and only 6 arguments can be used') 38 | raise SystemExit(22) 39 | 40 | rrr_dat_ncf=sys.argv[1] 41 | rrr_tit_str=sys.argv[2] 42 | rrr_ins_str=sys.argv[3] 43 | rrr_com_str=sys.argv[4] 44 | ZS_sem=sys.argv[5] 45 | ZS_inv=sys.argv[6] 46 | 47 | if ZS_sem!='': 48 | ZS_sem=float(ZS_sem) 49 | if ZS_inv!='': 50 | ZS_inv=float(ZS_inv) 51 | 52 | 53 | #******************************************************************************* 54 | #Print input information 55 | #******************************************************************************* 56 | print('Command line inputs') 57 | print('- '+rrr_dat_ncf) 58 | print('- '+rrr_tit_str) 59 | print('- '+rrr_ins_str) 60 | print('- '+rrr_com_str) 61 | print('- '+str(ZS_sem)) 62 | print('- '+str(ZS_inv)) 63 | 64 | 65 | #******************************************************************************* 66 | #Check if files exist 67 | #******************************************************************************* 68 | try: 69 | with open(rrr_dat_ncf) as file: 70 | pass 71 | except IOError as e: 72 | print('ERROR - Unable to open '+rrr_dat_ncf) 73 | raise SystemExit(22) 74 | 75 | 76 | #******************************************************************************* 77 | #Modifying attributes 78 | #******************************************************************************* 79 | print('Modifying attributes') 80 | 81 | #------------------------------------------------------------------------------- 82 | #Modify global attributes 83 | #------------------------------------------------------------------------------- 84 | #Note: Attributes of type "Character" need be deleted before modified if the 85 | #number of characters is different! 86 | 87 | f = netCDF4.Dataset(rrr_dat_ncf, 'r') 88 | 89 | if 'title' in f.ncattrs() and \ 90 | 'comment' in f.ncattrs() and \ 91 | 'institution' in f.ncattrs(): 92 | if f.getncattr('title')!=rrr_tit_str or \ 93 | f.getncattr('comment')!=rrr_com_str or \ 94 | f.getncattr('institution')!= rrr_ins_str: 95 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 96 | #The requested attributes differ from existing: changes needed 97 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 98 | f.close() 99 | f = netCDF4.Dataset(rrr_dat_ncf, 'r+') 100 | 101 | if 'Conventions' in f.ncattrs(): 102 | rrr_con_str=f.getncattr('Conventions') 103 | f.delncattr('Conventions') 104 | f.setncattr('Conventions',rrr_con_str) 105 | 106 | if 'title' in f.ncattrs(): 107 | #rrr_tit_str 108 | f.delncattr('title') 109 | f.setncattr('title',rrr_tit_str) 110 | 111 | if 'institution' in f.ncattrs(): 112 | #rrr_ins_str 113 | f.delncattr('institution') 114 | f.setncattr('institution',rrr_ins_str) 115 | 116 | if 'source' in f.ncattrs(): 117 | rrr_sou_str=f.getncattr('source') 118 | f.delncattr('source') 119 | f.setncattr('source',rrr_sou_str) 120 | 121 | if 'history' in f.ncattrs(): 122 | rrr_his_str=f.getncattr('history') 123 | f.delncattr('history') 124 | f.setncattr('history',rrr_his_str) 125 | 126 | if 'references' in f.ncattrs(): 127 | rrr_ref_str=f.getncattr('references') 128 | f.delncattr('references') 129 | f.setncattr('references',rrr_ref_str) 130 | 131 | if 'comment' in f.ncattrs(): 132 | #rrr_com_str 133 | f.delncattr('comment') 134 | f.setncattr('comment',rrr_com_str) 135 | 136 | if 'featureType' in f.ncattrs(): 137 | rrr_fea_str=f.getncattr('featureType') 138 | f.delncattr('featureType') 139 | f.setncattr('featureType',rrr_fea_str) 140 | print('- global attributes modified') 141 | else: 142 | print('- No modification made to global attributes') 143 | 144 | f.close() 145 | 146 | #------------------------------------------------------------------------------- 147 | #Modify crs attributes 148 | #------------------------------------------------------------------------------- 149 | f = netCDF4.Dataset(rrr_dat_ncf, 'r') 150 | 151 | if 'crs' in f.variables: 152 | crs=f.variables['crs'] 153 | if 'semi_major_axis' in crs.ncattrs() and \ 154 | 'inverse_flattening' in crs.ncattrs(): 155 | if crs.getncattr('semi_major_axis')!=ZS_sem or \ 156 | crs.getncattr('inverse_flattening')!=ZS_inv: 157 | # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 158 | #The requested attributes differ from existing: changes needed 159 | # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 160 | f.close() 161 | f = netCDF4.Dataset(rrr_dat_ncf, 'r+') 162 | crs.delncattr('semi_major_axis') 163 | crs.setncattr('semi_major_axis',ZS_sem) 164 | crs.delncattr('inverse_flattening') 165 | crs.setncattr('inverse_flattening',ZS_inv) 166 | print('- crs attributes modified') 167 | else: 168 | print('- No modification made to crs attributes') 169 | 170 | f.close() 171 | 172 | 173 | #******************************************************************************* 174 | #End 175 | #******************************************************************************* 176 | -------------------------------------------------------------------------------- /src/rrr_cpl_riv_lsm_lum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_cpl_riv_lsm_lum.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a netCDF file with time-varying external inflow (in m^3) into the river 8 | #network, a list of river IDs that are upstream of a given river ID, and the 9 | #river ID itself; this program computes the lumped discharge and stores it in a 10 | #new CSV file. 11 | #Author: 12 | #Cedric H. David, 2018-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Import Python modules 17 | #******************************************************************************* 18 | import sys 19 | import netCDF4 20 | import csv 21 | import numpy 22 | import datetime 23 | 24 | 25 | #******************************************************************************* 26 | #Declaration of variables (given as command line arguments) 27 | #******************************************************************************* 28 | # 1 - rrr_vol_ncf 29 | # 2 - rrr_ups_csv 30 | # 3 - YS_riv_id 31 | # 4 - rrr_hyd_csv 32 | 33 | 34 | #******************************************************************************* 35 | #Get command line arguments 36 | #******************************************************************************* 37 | IS_arg=len(sys.argv) 38 | if IS_arg != 5: 39 | print('ERROR - 4 and only 4 arguments must be used') 40 | raise SystemExit(22) 41 | 42 | rrr_vol_ncf=sys.argv[1] 43 | rrr_ups_csv=sys.argv[2] 44 | YS_riv_id=sys.argv[3] 45 | rrr_hyd_csv=sys.argv[4] 46 | 47 | 48 | #******************************************************************************* 49 | #Print input information 50 | #******************************************************************************* 51 | print('Command line inputs') 52 | print('- '+rrr_vol_ncf) 53 | print('- '+rrr_ups_csv) 54 | print('- '+YS_riv_id) 55 | print('- '+rrr_hyd_csv) 56 | 57 | 58 | #******************************************************************************* 59 | #Check if files exist 60 | #******************************************************************************* 61 | try: 62 | with open(rrr_vol_ncf) as file: 63 | pass 64 | except IOError as e: 65 | print('ERROR - Unable to open '+rrr_vol_ncf) 66 | raise SystemExit(22) 67 | 68 | try: 69 | with open(rrr_ups_csv) as file: 70 | pass 71 | except IOError as e: 72 | print('ERROR - Unable to open '+rrr_ups_csv) 73 | raise SystemExit(22) 74 | 75 | 76 | #******************************************************************************* 77 | #Reading netCDF file 78 | #******************************************************************************* 79 | print('Reading netCDF file') 80 | 81 | #------------------------------------------------------------------------------- 82 | #Open netCDF file 83 | #------------------------------------------------------------------------------- 84 | f1 = netCDF4.Dataset(rrr_vol_ncf, 'r') 85 | 86 | #------------------------------------------------------------------------------- 87 | #Get dimension names and sizes 88 | #------------------------------------------------------------------------------- 89 | if 'COMID' in f1.dimensions: 90 | YS_rivid='COMID' 91 | elif 'rivid' in f1.dimensions: 92 | YS_rivid='rivid' 93 | else: 94 | print('ERROR - Neither COMID nor rivid exist in '+rrr_vol_ncf) 95 | raise SystemExit(22) 96 | 97 | IS_riv_tot=len(f1.dimensions[YS_rivid]) 98 | print('- The number of river reaches is: '+str(IS_riv_tot)) 99 | 100 | if 'Time' in f1.dimensions: 101 | YS_time='Time' 102 | elif 'time' in f1.dimensions: 103 | YS_time='time' 104 | else: 105 | print('ERROR - Neither Time nor time exist in '+rrr_vol_ncf) 106 | raise SystemExit(22) 107 | 108 | IS_time=len(f1.dimensions[YS_time]) 109 | print('- The number of time steps is: '+str(IS_time)) 110 | 111 | #------------------------------------------------------------------------------- 112 | #Get variable names 113 | #------------------------------------------------------------------------------- 114 | if 'm3_riv' in f1.variables: 115 | YS_var='m3_riv' 116 | else: 117 | print('ERROR - m3_riv does not exist in '+rrr_vol_ncf) 118 | raise SystemExit(22) 119 | 120 | 121 | #******************************************************************************* 122 | #Reading CSV file 123 | #******************************************************************************* 124 | print('Reading CSV file') 125 | 126 | IV_riv_ups_id=[] 127 | with open(rrr_ups_csv,'r') as csvfile: 128 | csvreader=csv.reader(csvfile) 129 | #IS_riv_ups=sum(1 for row in csvreader)-1 130 | YV_header=next(iter(csvreader)) 131 | for row in csvreader: 132 | IV_riv_ups_id.append(int(row[0])) 133 | 134 | IS_riv_ups=len(IV_riv_ups_id) 135 | print('- Number of river reaches in rrr_ups_csv: '+str(IS_riv_ups)) 136 | 137 | 138 | #******************************************************************************* 139 | #Creating hash table 140 | #******************************************************************************* 141 | print('Creating hash table') 142 | 143 | IV_riv_tot_id=f1.variables[YS_rivid][:] 144 | 145 | ZM_hsh={} 146 | for JS_riv_tot in range(IS_riv_tot): 147 | ZM_hsh[IV_riv_tot_id[JS_riv_tot]]=JS_riv_tot 148 | 149 | print('- Done') 150 | 151 | 152 | #******************************************************************************* 153 | #Computing lumped runoff for given list of river IDs 154 | #******************************************************************************* 155 | print('Computing lumped runoff for given list of river IDs') 156 | 157 | ZV_time=[] 158 | for JS_time in range(IS_time): 159 | ZV_time.append(f1.variables[YS_time][JS_time]) 160 | 161 | YV_time=[datetime.datetime.utcfromtimestamp(t).strftime('%Y-%m-%d') \ 162 | for t in ZV_time] 163 | 164 | IV_riv_ups_index=[] 165 | for JS_riv_ups in range(IS_riv_ups): 166 | IV_riv_ups_index.append(ZM_hsh[IV_riv_ups_id[JS_riv_ups]]) 167 | 168 | 169 | ZV_var_lum=[] 170 | for JS_time in range(IS_time): 171 | ZV_var_tmp=f1.variables[YS_var][JS_time,:] 172 | ZV_var_lum.append(sum(ZV_var_tmp[IV_riv_ups_index])) 173 | #Lumping all water volume 174 | 175 | ZV_var_lum=ZV_var_lum/(ZV_time[1]-ZV_time[0]) 176 | #Converting to discharge 177 | 178 | 179 | #******************************************************************************* 180 | #Write rrr_hyd_csv file 181 | #******************************************************************************* 182 | print('Write rrr_hyd_csv file') 183 | 184 | with open(rrr_hyd_csv, 'w') as csvfile: 185 | csvwriter = csv.writer(csvfile, dialect='excel') 186 | csvwriter.writerow(['Lumped',YS_riv_id]) 187 | for JS_time in range(IS_time): 188 | IV_line=[YV_time[JS_time],ZV_var_lum[JS_time]] 189 | csvwriter.writerow(IV_line) 190 | #Write hydrographs 191 | 192 | 193 | #******************************************************************************* 194 | #End 195 | #******************************************************************************* 196 | -------------------------------------------------------------------------------- /src/rrr_lsm_tot_cmb_acc.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | #******************************************************************************* 3 | #rrr_lsm_tot_cmb_acc.sh 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a list of netCDF files with one time step each, produces a combined 8 | #(concatenated) netCDF file with averages computed over a given number of time 9 | #steps. 10 | #Each netCDF file contains the following information: 11 | # - nc_file(lat,lon,time) 12 | # . lon(lon) 13 | # . lat(lat) 14 | # . RUNSF(time,lat,lon) 15 | # . RUNSB(time,lat,lon) 16 | #This program uses the netCDF Common Operators (NCO, i.e. ncra). 17 | #Author: 18 | #Cedric H. David, 2016-2023 19 | 20 | 21 | #******************************************************************************* 22 | #Declaration of variables (given as command line arguments) 23 | #******************************************************************************* 24 | # 1 - 1st netCDF file 25 | # 2 - 2nd netCDF file 26 | # 3 - 3rd netCDF file 27 | # i - ith netCDF file 28 | # n-1 - Number of contiguous netCDF files to be averaged together in each step 29 | # n - output netCDF file 30 | 31 | 32 | #******************************************************************************* 33 | #Compute the number of netCDF files 34 | #******************************************************************************* 35 | let "IS_file=$#-2" 36 | 37 | 38 | #******************************************************************************* 39 | #Check command line inputs 40 | #******************************************************************************* 41 | if [ "$#" -lt "3" ]; then 42 | echo "A minimum of 3 arguments must be provided" 1>&2 43 | exit 22 44 | fi 45 | #Check that 3 or more arguments were provided 46 | 47 | for ii in "${@:1:$IS_file}"; do # `seq 1 $IS_file`; do # $#-2; do 48 | if [ ! -e "${ii}" ]; then 49 | echo "Input file ${ii} doesn't exist" 1>&2 50 | exit 22 51 | fi 52 | done 53 | #Check that the input files exist. ${@:1:$IS_file}: the first IS_file arguments 54 | 55 | 56 | #******************************************************************************* 57 | #Assign command line arguments to local variables 58 | #******************************************************************************* 59 | IS_step=${@:$IS_file+1:1} 60 | #from the list of arguments, starting at IS_file+1, 1 argument (2nd to last) 61 | nc_file=${@:$IS_file+2:1} 62 | #from the list of arguments, starting at IS_file+2, 1 argument (last) 63 | nc_temp=$nc_file'.tmp' 64 | 65 | 66 | #******************************************************************************* 67 | #If file doesn't already exist: average and concatenate, then scale 68 | #******************************************************************************* 69 | if [ ! -e "$nc_file" ]; then 70 | 71 | #------------------------------------------------------------------------------- 72 | #average and concatenate 73 | #------------------------------------------------------------------------------- 74 | if [ ! -e "$nc_temp" ]; then 75 | ncra -O --mro -d time,,,$IS_step,$IS_step \ 76 | ${@:1:$IS_file} \ 77 | -o $nc_temp 78 | fi 79 | #This uses the NCO 'subcycle' and 'multi record outputs' options. 80 | 81 | #------------------------------------------------------------------------------- 82 | #Rename netCDF variables in case it was not done before 83 | #------------------------------------------------------------------------------- 84 | if [ -e "$nc_temp" ]; then 85 | ncrename -v .SSRUN,RUNSF \ 86 | -v .BGRUN,RUNSB \ 87 | -v .Qs_acc,RUNSF \ 88 | -v .Qsb_acc,RUNSB \ 89 | -v .Qs_tavg,RUNSF \ 90 | -v .Qsb_tavg,RUNSB \ 91 | -d .east_west,lon \ 92 | -d .north_south,lat \ 93 | $nc_temp 94 | fi 95 | #Adding "." before a variable name informs ncrename that it's optional. 96 | #SSRUN / BGRUN are used in NLDAS for surface runoff / subsurface runoff (resp). 97 | #Q_xx / Qs_xx are used in GLDAS for surface runoff / subsurface runoff (resp). 98 | # xx can be: acc, or tavg. 99 | 100 | #------------------------------------------------------------------------------- 101 | #scale so that value is accumulated, not averaged 102 | #------------------------------------------------------------------------------- 103 | ncap2 -s "RUNSF=RUNSF*$IS_step;RUNSB=RUNSB*$IS_step" \ 104 | $nc_temp \ 105 | -o $nc_file 106 | 107 | #------------------------------------------------------------------------------- 108 | #Delete temporary file 109 | #------------------------------------------------------------------------------- 110 | rm -f $nc_temp 111 | 112 | #------------------------------------------------------------------------------- 113 | #End file existence 114 | #------------------------------------------------------------------------------- 115 | fi 116 | 117 | 118 | #******************************************************************************* 119 | #End 120 | #******************************************************************************* 121 | -------------------------------------------------------------------------------- /src/rrr_obs_bas_cal.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_obs_bas_cal.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a CSV file river IDs where gauges are available and a shapefile with 8 | #gauges where calibration will take place; this program creates a subset CSV 9 | #file corresponding to calibration gauges. 10 | #Author: 11 | #Cedric H. David, 2023-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import csv 19 | import fiona 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables (given as command line arguments) 24 | #******************************************************************************* 25 | # 1 - rrr_tot_csv 26 | # 2 - rrr_cal_shp 27 | # 3 - rrr_bas_csv 28 | 29 | 30 | #******************************************************************************* 31 | #Get command line arguments 32 | #******************************************************************************* 33 | IS_arg=len(sys.argv) 34 | if IS_arg != 4: 35 | print('ERROR - 3 and only 3 arguments can be used') 36 | raise SystemExit(22) 37 | 38 | rrr_tot_csv=sys.argv[1] 39 | rrr_cal_shp=sys.argv[2] 40 | rrr_bas_csv=sys.argv[3] 41 | 42 | 43 | #******************************************************************************* 44 | #Print input information 45 | #******************************************************************************* 46 | print('Command line inputs') 47 | print('- '+rrr_tot_csv) 48 | print('- '+rrr_cal_shp) 49 | print('- '+rrr_bas_csv) 50 | 51 | 52 | #******************************************************************************* 53 | #Check if files exist 54 | #******************************************************************************* 55 | try: 56 | with open(rrr_tot_csv) as file: 57 | pass 58 | except IOError as e: 59 | print('ERROR - Unable to open '+rrr_tot_csv) 60 | raise SystemExit(22) 61 | 62 | try: 63 | with open(rrr_cal_shp) as file: 64 | pass 65 | except IOError as e: 66 | print('WARNING - Unable to open '+rrr_cal_shp+', skipping') 67 | raise SystemExit(-22) 68 | 69 | 70 | #******************************************************************************* 71 | #Read CSV file 72 | #******************************************************************************* 73 | print('Read CSV file') 74 | 75 | IV_tot_ids=[] 76 | with open(rrr_tot_csv) as csvfile: 77 | csvreader=csv.reader(csvfile) 78 | for row in csvreader: 79 | IV_tot_ids.append(int(row[0])) 80 | 81 | IS_tot_csv=len(IV_tot_ids) 82 | print('- The number of available gauges in CSV file is: '+str(IS_tot_csv)) 83 | 84 | 85 | #******************************************************************************* 86 | #Read gauge shapefile 87 | #******************************************************************************* 88 | print('Read gauge shapefile') 89 | 90 | rrr_cal_lay=fiona.open(rrr_cal_shp, 'r') 91 | 92 | if 'Sttn_Nm' in rrr_cal_lay[0]['properties']: 93 | YS_cal_nam='Sttn_Nm' 94 | else: 95 | print('ERROR - Sttn_Nm does not exist in '+rrr_cal_shp) 96 | raise SystemExit(22) 97 | 98 | if 'rivid' in rrr_cal_lay[0]['properties']: 99 | YS_cal_ids='rivid' 100 | else: 101 | print('ERROR - rivid does not exist in '+rrr_cal_shp) 102 | raise SystemExit(22) 103 | 104 | IS_cal_shp=len(rrr_cal_lay) 105 | print('- The number of gauge features is: '+str(IS_cal_shp)) 106 | 107 | YV_cal_nam=[] 108 | IV_cal_ids=[] 109 | for rrr_cal_fea in rrr_cal_lay: 110 | YV_cal_nam.append(rrr_cal_fea['properties'][YS_cal_nam]) 111 | IV_cal_ids.append(rrr_cal_fea['properties'][YS_cal_ids]) 112 | 113 | 114 | #******************************************************************************* 115 | #Extract calibration IDs 116 | #******************************************************************************* 117 | print('Extract calibration IDs') 118 | 119 | IV_bas_ids=[] 120 | for JS_tot_csv in range(IS_tot_csv): 121 | if IV_tot_ids[JS_tot_csv] in IV_cal_ids: 122 | IV_bas_ids.append(IV_tot_ids[JS_tot_csv]) 123 | 124 | IS_bas_csv=len(IV_bas_ids) 125 | print('- The number of calibration gauges in CSV file is: '+str(IS_bas_csv)) 126 | 127 | 128 | #******************************************************************************* 129 | #Write calibration CSV file 130 | #******************************************************************************* 131 | print('Write calibration CSV file') 132 | 133 | with open(rrr_bas_csv, 'w') as csvfile: 134 | csvwriter = csv.writer(csvfile, dialect='excel') 135 | for JS_bas_csv in range(IS_bas_csv): 136 | csvwriter.writerow([IV_bas_ids[JS_bas_csv]]) 137 | 138 | print('- Done') 139 | 140 | 141 | #******************************************************************************* 142 | #End 143 | #******************************************************************************* 144 | -------------------------------------------------------------------------------- /src/rrr_obs_bas_sub.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_obs_bas_sub.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a CSV file with timeseries observations at many gauges with the first 8 | #column being time, the first row being gauge code (or name) and each subsequent 9 | #row is flow observations, and given a shapefile with a subset of these gauges 10 | #that includes both gauge code and river reach ID; this program creates a subset 11 | #of observations in a format that is suitable for RAPID: i.e. a CSV file with 12 | #sorted river IDS, and a CSV file with corresponding flow observations. 13 | #Author: 14 | #Cedric H. David, 2023-2023 15 | 16 | 17 | #******************************************************************************* 18 | #Import Python modules 19 | #******************************************************************************* 20 | import sys 21 | import csv 22 | import numpy 23 | import fiona 24 | 25 | 26 | #******************************************************************************* 27 | #Declaration of variables (given as command line arguments) 28 | #******************************************************************************* 29 | # 1 - rrr_tQo_csv 30 | # 2 - rrr_obs_shp 31 | # 3 - rrr_rid_csv 32 | # 4 - rrr_Qob_csv 33 | 34 | 35 | #******************************************************************************* 36 | #Get command line arguments 37 | #******************************************************************************* 38 | IS_arg=len(sys.argv) 39 | if IS_arg != 5: 40 | print('ERROR - 4 and only 4 arguments can be used') 41 | raise SystemExit(22) 42 | 43 | rrr_tQo_csv=sys.argv[1] 44 | rrr_obs_shp=sys.argv[2] 45 | rrr_rid_csv=sys.argv[3] 46 | rrr_Qob_csv=sys.argv[4] 47 | 48 | 49 | #******************************************************************************* 50 | #Print input information 51 | #******************************************************************************* 52 | print('Command line inputs') 53 | print(' - '+rrr_tQo_csv) 54 | print(' - '+rrr_obs_shp) 55 | print(' - '+rrr_rid_csv) 56 | print(' - '+rrr_Qob_csv) 57 | 58 | 59 | #******************************************************************************* 60 | #Check if files exist 61 | #******************************************************************************* 62 | try: 63 | with open(rrr_tQo_csv) as file: 64 | pass 65 | except IOError as e: 66 | print('ERROR - Unable to open '+rrr_tQo_csv) 67 | raise SystemExit(22) 68 | 69 | try: 70 | with open(rrr_obs_shp) as file: 71 | pass 72 | except IOError as e: 73 | print('WARNING - Unable to open '+rrr_obs_shp+', skipping') 74 | raise SystemExit(-22) 75 | 76 | 77 | #******************************************************************************* 78 | #Read CSV file 79 | #******************************************************************************* 80 | print('Read CSV file') 81 | 82 | with open(rrr_tQo_csv) as csvfile: 83 | csvreader=csv.reader(csvfile) 84 | row=next(iter(csvreader)) 85 | YV_tQo_nam=row[1:] 86 | IS_tQo_csv=len(YV_tQo_nam) 87 | 88 | IS_time=0 89 | for row in csvreader: 90 | IS_time=IS_time+1 91 | 92 | print('- The number of gauges in CSV file is: '+str(IS_tQo_csv)) 93 | print('- The number of time steps in CSV file is: '+str(IS_time)) 94 | 95 | 96 | #******************************************************************************* 97 | #Read gauge shapefile 98 | #******************************************************************************* 99 | print('Read gauge shapefile') 100 | 101 | rrr_obs_lay=fiona.open(rrr_obs_shp, 'r') 102 | 103 | if 'Sttn_Nm' in rrr_obs_lay[0]['properties']: 104 | YS_obs_nam='Sttn_Nm' 105 | else: 106 | print('ERROR - Sttn_Nm does not exist in '+rrr_obs_shp) 107 | raise SystemExit(22) 108 | 109 | if 'rivid' in rrr_obs_lay[0]['properties']: 110 | YS_obs_ids='rivid' 111 | else: 112 | print('ERROR - rivid does not exist in '+rrr_obs_shp) 113 | raise SystemExit(22) 114 | 115 | IS_obs_shp=len(rrr_obs_lay) 116 | print(' - The number of gauge features is: '+str(IS_obs_shp)) 117 | 118 | YV_obs_nam=[] 119 | IV_obs_ids=[] 120 | for rrr_obs_fea in rrr_obs_lay: 121 | YV_obs_nam.append(rrr_obs_fea['properties'][YS_obs_nam]) 122 | IV_obs_ids.append(rrr_obs_fea['properties'][YS_obs_ids]) 123 | 124 | 125 | #******************************************************************************* 126 | #Create hash table 127 | #******************************************************************************* 128 | print('Create hash table') 129 | 130 | YM_hsh={} 131 | for JS_tQo_csv in range(IS_tQo_csv): 132 | tQo_nam=YV_tQo_nam[JS_tQo_csv] 133 | YM_hsh[tQo_nam]=JS_tQo_csv 134 | 135 | print(' - Done') 136 | 137 | 138 | #******************************************************************************* 139 | #Check all requested basin stations are in input file 140 | #******************************************************************************* 141 | print('Check all requested basin stations are in input file') 142 | 143 | for JS_obs_shp in range(IS_obs_shp): 144 | obs_nam=YV_obs_nam[JS_obs_shp] 145 | if obs_nam not in YM_hsh: 146 | print('ERROR - '+obs_nam+' does not exist in '+rrr_tQo_csv) 147 | raise SystemExit(22) 148 | 149 | print(' - Done') 150 | 151 | 152 | #******************************************************************************* 153 | #Sort river IDs of subsample 154 | #******************************************************************************* 155 | print('Sort river IDs of subsample') 156 | 157 | z=sorted(zip(IV_obs_ids,YV_obs_nam)) 158 | IV_obs_ids_srt,YV_obs_nam_srt=zip(*z) 159 | #Sorting the lists together based on increasing value of the river ID. 160 | IV_obs_ids_srt=list(IV_obs_ids_srt) 161 | YV_obs_nam_srt=list(YV_obs_nam_srt) 162 | #Because zip creates tuples and not lists 163 | 164 | IV_obs_ids=IV_obs_ids_srt.copy() 165 | YV_obs_nam=YV_obs_nam_srt.copy() 166 | #Reassigning the original names after sorting 167 | 168 | print(' - Done') 169 | 170 | 171 | #******************************************************************************* 172 | #Subsample observations and write CSV files 173 | #******************************************************************************* 174 | print('Subsample observations and write CSV files') 175 | 176 | IV_obs_idx=[YM_hsh[tQo_nam] for tQo_nam in YV_obs_nam] 177 | 178 | with open(rrr_rid_csv, 'w') as csvfil2: 179 | csvwriter = csv.writer(csvfil2, dialect='excel') 180 | for JS_obs_ids in range(len(IV_obs_ids)): 181 | csvwriter.writerow([IV_obs_ids[JS_obs_ids]]) 182 | 183 | with open(rrr_Qob_csv, 'w') as csvfil2: 184 | csvwriter = csv.writer(csvfil2, dialect='excel') 185 | 186 | with open(rrr_tQo_csv) as csvfile: 187 | csvreader=csv.reader(csvfile) 188 | row=next(iter(csvreader)) 189 | 190 | for row in csvreader: 191 | ZV_Qob=row[1:] 192 | ZV_line=[ZV_Qob[idx] for idx in IV_obs_idx] 193 | csvwriter.writerow(ZV_line) 194 | 195 | print(' - Done') 196 | 197 | 198 | #******************************************************************************* 199 | #End 200 | #******************************************************************************* 201 | -------------------------------------------------------------------------------- /src/rrr_obs_tot_tbl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_obs_tot_tbl.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given an observing stations shapefile with unique integer identifiers, unique 8 | #station codes, names, longitude, and latitude, this program creates one csv 9 | #file that contains a summary table indexed by station code. 10 | #Author: 11 | #Cedric H. David, 2016-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import os 19 | import fiona 20 | import csv 21 | 22 | 23 | #******************************************************************************* 24 | #Declaration of variables (given as command line arguments) 25 | #******************************************************************************* 26 | # 1 - rrr_obs_shp 27 | # 2 - rrr_tbl_csv 28 | 29 | 30 | #******************************************************************************* 31 | #Get command line arguments 32 | #******************************************************************************* 33 | IS_arg=len(sys.argv) 34 | if IS_arg != 3: 35 | print('ERROR - 2 and only 2 arguments can be used') 36 | raise SystemExit(22) 37 | 38 | rrr_obs_shp=sys.argv[1] 39 | rrr_tbl_csv=sys.argv[2] 40 | 41 | 42 | #******************************************************************************* 43 | #Print input information 44 | #******************************************************************************* 45 | print('Command line inputs') 46 | print('- '+rrr_obs_shp) 47 | print('- '+rrr_tbl_csv) 48 | 49 | 50 | #******************************************************************************* 51 | #Check if files and directories exist 52 | #******************************************************************************* 53 | try: 54 | with open(rrr_obs_shp) as file: 55 | pass 56 | except IOError as e: 57 | print('ERROR - Unable to open '+rrr_obs_shp) 58 | raise SystemExit(22) 59 | 60 | 61 | #******************************************************************************* 62 | #Read rrr_obs_shp 63 | #******************************************************************************* 64 | print('Read rrr_obs_shp') 65 | 66 | rrr_obs_lay=fiona.open(rrr_obs_shp, 'r') 67 | IS_obs_tot=len(rrr_obs_lay) 68 | print('- The number of gauge features is: '+str(IS_obs_tot)) 69 | 70 | if 'COMID_1' in rrr_obs_lay[0]['properties']: 71 | YV_obs_id='COMID_1' 72 | elif 'FLComID' in rrr_obs_lay[0]['properties']: 73 | YV_obs_id='FLComID' 74 | elif 'ARCID' in rrr_obs_lay[0]['properties']: 75 | YV_obs_id='ARCID' 76 | else: 77 | print('ERROR - COMID_1, FLComID, or ARCID do not exist in '+rrr_obs_shp) 78 | raise SystemExit(22) 79 | 80 | if 'STATION_NM' in rrr_obs_lay[0]['properties']: 81 | YV_obs_nm='STATION_NM' 82 | elif 'Name' in rrr_obs_lay[0]['properties']: 83 | YV_obs_nm='Name' 84 | else: 85 | print('ERROR - STATION_NM or Name do not exist in '+rrr_obs_shp) 86 | raise SystemExit(22) 87 | 88 | if 'SOURCE_FEA' in rrr_obs_lay[0]['properties']: 89 | YV_obs_cd='SOURCE_FEA' 90 | elif 'Code' in rrr_obs_lay[0]['properties']: 91 | YV_obs_cd='Code' 92 | else: 93 | print('ERROR - Neither SOURCE_FEA nor Code exist in '+rrr_obs_shp) 94 | raise SystemExit(22) 95 | 96 | if 'Lon' in rrr_obs_lay[0]['properties']: 97 | YV_obs_ln='Lon' 98 | else: 99 | print('ERROR - Lon does not exist in '+rrr_obs_shp) 100 | raise SystemExit(22) 101 | 102 | if 'Lat' in rrr_obs_lay[0]['properties']: 103 | YV_obs_lt='Lat' 104 | else: 105 | print('ERROR - Lat does not exist in '+rrr_obs_shp) 106 | raise SystemExit(22) 107 | 108 | IV_obs_tot_id=[] 109 | YV_obs_tot_nm=[] 110 | YV_obs_tot_cd=[] 111 | ZV_obs_tot_ln=[] 112 | ZV_obs_tot_lt=[] 113 | for JS_obs_tot in range(IS_obs_tot): 114 | IV_obs_tot_id.append(int(rrr_obs_lay[JS_obs_tot]['properties'][YV_obs_id])) 115 | YV_obs_tot_cd.append(str(rrr_obs_lay[JS_obs_tot]['properties'][YV_obs_cd])) 116 | YV_obs_tot_nm.append(str(rrr_obs_lay[JS_obs_tot]['properties'][YV_obs_nm])) 117 | ZV_obs_tot_ln.append(float(rrr_obs_lay[JS_obs_tot]['properties'][YV_obs_ln])) 118 | ZV_obs_tot_lt.append(float(rrr_obs_lay[JS_obs_tot]['properties'][YV_obs_lt])) 119 | 120 | z = sorted(zip(IV_obs_tot_id,YV_obs_tot_nm,YV_obs_tot_cd,ZV_obs_tot_ln, \ 121 | ZV_obs_tot_lt)) 122 | IV_obs_tot_id_srt,YV_obs_tot_nm_srt,YV_obs_tot_cd_srt, \ 123 | ZV_obs_tot_ln_srt,ZV_obs_tot_lt_srt =zip(*z) 124 | #Sorting the lists together based on increasing value of the river ID. 125 | 126 | 127 | #******************************************************************************* 128 | #Write formatted table 129 | #******************************************************************************* 130 | print('Write formatted table') 131 | 132 | with open(rrr_tbl_csv, 'w') as csvfile: 133 | csvwriter = csv.writer(csvfile, dialect='excel', quotechar="'", \ 134 | quoting=csv.QUOTE_NONNUMERIC) 135 | csvwriter.writerow(['RIVID','CODE','NAME','LON','LAT']) 136 | for JS_obs_tot in range(IS_obs_tot): 137 | csvwriter.writerow([IV_obs_tot_id_srt[JS_obs_tot], \ 138 | YV_obs_tot_cd_srt[JS_obs_tot], \ 139 | YV_obs_tot_nm_srt[JS_obs_tot], \ 140 | ZV_obs_tot_ln_srt[JS_obs_tot], \ 141 | ZV_obs_tot_lt_srt[JS_obs_tot] ]) 142 | 143 | 144 | #******************************************************************************* 145 | #End 146 | #******************************************************************************* 147 | -------------------------------------------------------------------------------- /src/rrr_riv_bas_gen_one_hydrosheds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_riv_bas_gen_one_hydrosheds.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a river shapefile from HydroSHEDS, a connectivity table and associated 8 | #sorting integer, this program creates a csv file with the following 9 | #information: 10 | # - rrr_riv_csv 11 | # . River ID (sorted from upstream to downstream) 12 | #Author: 13 | #Cedric H. David, 2014-2023 14 | 15 | 16 | #******************************************************************************* 17 | #Import Python modules 18 | #******************************************************************************* 19 | import sys 20 | import fiona 21 | import csv 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - hsd_riv_shp 28 | # 2 - rrr_con_csv 29 | # 3 - rrr_srt_csv 30 | # 4 - rrr_riv_csv 31 | 32 | 33 | #******************************************************************************* 34 | #Get command line arguments 35 | #******************************************************************************* 36 | IS_arg=len(sys.argv) 37 | if IS_arg != 5: 38 | print('ERROR - 4 and only 4 arguments can be used') 39 | raise SystemExit(22) 40 | 41 | hsd_riv_shp=sys.argv[1] 42 | rrr_con_csv=sys.argv[2] 43 | rrr_srt_csv=sys.argv[3] 44 | rrr_riv_csv=sys.argv[4] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+hsd_riv_shp) 52 | print('- '+rrr_srt_csv) 53 | print('- '+rrr_riv_csv) 54 | 55 | 56 | #******************************************************************************* 57 | #Check if files exist 58 | #******************************************************************************* 59 | try: 60 | with open(hsd_riv_shp) as file: 61 | pass 62 | except IOError as e: 63 | print('ERROR - Unable to open '+hsd_riv_shp) 64 | raise SystemExit(22) 65 | 66 | try: 67 | with open(rrr_con_csv) as file: 68 | pass 69 | except IOError as e: 70 | print('ERROR - Unable to open '+rrr_con_csv) 71 | raise SystemExit(22) 72 | 73 | try: 74 | with open(rrr_srt_csv) as file: 75 | pass 76 | except IOError as e: 77 | print('ERROR - Unable to open '+rrr_srt_csv) 78 | raise SystemExit(22) 79 | 80 | 81 | #******************************************************************************* 82 | #Reading input files 83 | #******************************************************************************* 84 | print('Reading input files') 85 | 86 | #------------------------------------------------------------------------------- 87 | #Reading river shapefile 88 | #------------------------------------------------------------------------------- 89 | print('- Reading river shapefile') 90 | 91 | hsd_riv_lay=fiona.open(hsd_riv_shp, 'r') 92 | IS_riv_bas=len(hsd_riv_lay) 93 | print(' . Number of river reaches in rrr_riv_shp: '+str(IS_riv_bas)) 94 | 95 | if 'ARCID' in hsd_riv_lay[0]['properties']: 96 | YV_riv_id='ARCID' 97 | else: 98 | print('ERROR - ARCID does not exist in '+hsd_riv_shp) 99 | raise SystemExit(22) 100 | 101 | IV_riv_bas_id=[] 102 | for JS_riv_bas in range(IS_riv_bas): 103 | hsd_riv_prp=hsd_riv_lay[JS_riv_bas]['properties'] 104 | IV_riv_bas_id.append(int(hsd_riv_prp[YV_riv_id])) 105 | 106 | #------------------------------------------------------------------------------- 107 | #Reading connectivity file 108 | #------------------------------------------------------------------------------- 109 | print('- Reading connectivity file') 110 | IV_riv_tot_id=[] 111 | with open(rrr_con_csv,'r') as csvfile: 112 | csvreader=csv.reader(csvfile) 113 | for row in csvreader: 114 | IV_riv_tot_id.append(int(row[0])) 115 | IS_riv_tot1=len(IV_riv_tot_id) 116 | print(' . Number of river reaches in rrr_con_csv: '+str(IS_riv_tot1)) 117 | 118 | #------------------------------------------------------------------------------- 119 | #Reading sort file 120 | #------------------------------------------------------------------------------- 121 | print('- Reading sort file') 122 | IV_riv_tot_sort=[] 123 | with open(rrr_srt_csv,'r') as csvfile: 124 | csvreader=csv.reader(csvfile) 125 | for row in csvreader: 126 | IV_riv_tot_sort.append(int(row[0])) 127 | IS_riv_tot2=len(IV_riv_tot_sort) 128 | print(' . Number of river reaches in rrr_srt_csv: '+str(IS_riv_tot2)) 129 | 130 | 131 | #******************************************************************************* 132 | #Check that sizes of rrr_con_csv and rrr_srt_csv are the same 133 | #******************************************************************************* 134 | if IS_riv_tot1==IS_riv_tot2: 135 | IS_riv_tot=IS_riv_tot1 136 | else: 137 | print('ERROR - The number of river reaches in rrr_con_csv and in ' \ 138 | 'rrr_srt_csv differ') 139 | raise SystemExit(22) 140 | 141 | 142 | #******************************************************************************* 143 | #Assign sort values to each reach in basin 144 | #******************************************************************************* 145 | IM_hsh={} 146 | for JS_riv_tot in range(IS_riv_tot): 147 | IM_hsh[IV_riv_tot_id[JS_riv_tot]]=JS_riv_tot 148 | 149 | IV_riv_bas_sort=[] 150 | for JS_riv_bas in range(IS_riv_bas): 151 | if IV_riv_bas_id[JS_riv_bas] in IM_hsh: 152 | JS_riv_tot=IM_hsh[IV_riv_bas_id[JS_riv_bas]] 153 | IV_riv_bas_sort.append(IV_riv_tot_sort[JS_riv_tot]) 154 | else: 155 | print('ERROR - Reach ID '+str(IV_riv_bas_id[JS_riv_bas])+'is not in '\ 156 | 'rrr_con_csv') 157 | raise SystemExit(22) 158 | 159 | 160 | #******************************************************************************* 161 | #Sort files 162 | #******************************************************************************* 163 | print('Sorting') 164 | z=zip(*sorted(zip(IV_riv_bas_sort,IV_riv_bas_id), reverse=True, \ 165 | key=lambda x: x[0]) ) 166 | IV_riv_bas_sort2, IV_riv_bas_id2=z 167 | 168 | #print(IV_riv_bas_sort2[0]) 169 | #print(IV_riv_bas_sort[0]) 170 | 171 | 172 | #******************************************************************************* 173 | #Write outputs 174 | #******************************************************************************* 175 | print('Writing file') 176 | 177 | with open(rrr_riv_csv, 'w') as csvfile: 178 | csvwriter = csv.writer(csvfile, dialect='excel') 179 | for JS_riv_bas in range(IS_riv_bas): 180 | csvwriter.writerow([IV_riv_bas_id2[JS_riv_bas]]) 181 | 182 | 183 | #******************************************************************************* 184 | #End 185 | #******************************************************************************* 186 | -------------------------------------------------------------------------------- /src/rrr_riv_bas_gen_one_meritbasins.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_riv_bas_gen_one_meritbasins.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a river shapefile from MERIT Basins, a connectivity table and associated 8 | #sorting integer, this program creates a csv file with the following 9 | #information: 10 | # - rrr_riv_csv 11 | # . River ID (sorted from upstream to downstream) 12 | #Author: 13 | #Cedric H. David, 2022-2023 14 | 15 | 16 | #******************************************************************************* 17 | #Import Python modules 18 | #******************************************************************************* 19 | import sys 20 | import fiona 21 | import csv 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - mer_riv_shp 28 | # 2 - rrr_con_csv 29 | # 3 - rrr_srt_csv 30 | # 4 - rrr_riv_csv 31 | 32 | 33 | #******************************************************************************* 34 | #Get command line arguments 35 | #******************************************************************************* 36 | IS_arg=len(sys.argv) 37 | if IS_arg != 5: 38 | print('ERROR - 4 and only 4 arguments can be used') 39 | raise SystemExit(22) 40 | 41 | mer_riv_shp=sys.argv[1] 42 | rrr_con_csv=sys.argv[2] 43 | rrr_srt_csv=sys.argv[3] 44 | rrr_riv_csv=sys.argv[4] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+mer_riv_shp) 52 | print('- '+rrr_srt_csv) 53 | print('- '+rrr_riv_csv) 54 | 55 | 56 | #******************************************************************************* 57 | #Check if files exist 58 | #******************************************************************************* 59 | try: 60 | with open(mer_riv_shp) as file: 61 | pass 62 | except IOError as e: 63 | print('ERROR - Unable to open '+mer_riv_shp) 64 | raise SystemExit(22) 65 | 66 | try: 67 | with open(rrr_con_csv) as file: 68 | pass 69 | except IOError as e: 70 | print('ERROR - Unable to open '+rrr_con_csv) 71 | raise SystemExit(22) 72 | 73 | try: 74 | with open(rrr_srt_csv) as file: 75 | pass 76 | except IOError as e: 77 | print('ERROR - Unable to open '+rrr_srt_csv) 78 | raise SystemExit(22) 79 | 80 | 81 | #******************************************************************************* 82 | #Reading input files 83 | #******************************************************************************* 84 | print('Reading input files') 85 | 86 | #------------------------------------------------------------------------------- 87 | #Reading river shapefile 88 | #------------------------------------------------------------------------------- 89 | print('- Reading river shapefile') 90 | 91 | mer_riv_lay=fiona.open(mer_riv_shp, 'r') 92 | IS_riv_bas=len(mer_riv_lay) 93 | print(' . Number of river reaches in rrr_riv_shp: '+str(IS_riv_bas)) 94 | 95 | if 'COMID' in mer_riv_lay[0]['properties']: 96 | YV_riv_id='COMID' 97 | else: 98 | print('ERROR - COMID does not exist in '+mer_riv_shp) 99 | raise SystemExit(22) 100 | 101 | IV_riv_bas_id=[] 102 | for JS_riv_bas in range(IS_riv_bas): 103 | mer_riv_prp=mer_riv_lay[JS_riv_bas]['properties'] 104 | IV_riv_bas_id.append(int(mer_riv_prp[YV_riv_id])) 105 | 106 | #------------------------------------------------------------------------------- 107 | #Reading connectivity file 108 | #------------------------------------------------------------------------------- 109 | print('- Reading connectivity file') 110 | IV_riv_tot_id=[] 111 | with open(rrr_con_csv,'r') as csvfile: 112 | csvreader=csv.reader(csvfile) 113 | for row in csvreader: 114 | IV_riv_tot_id.append(int(row[0])) 115 | IS_riv_tot1=len(IV_riv_tot_id) 116 | print(' . Number of river reaches in rrr_con_csv: '+str(IS_riv_tot1)) 117 | 118 | #------------------------------------------------------------------------------- 119 | #Reading sort file 120 | #------------------------------------------------------------------------------- 121 | print('- Reading sort file') 122 | IV_riv_tot_sort=[] 123 | with open(rrr_srt_csv,'r') as csvfile: 124 | csvreader=csv.reader(csvfile) 125 | for row in csvreader: 126 | IV_riv_tot_sort.append(int(row[0])) 127 | IS_riv_tot2=len(IV_riv_tot_sort) 128 | print(' . Number of river reaches in rrr_srt_csv: '+str(IS_riv_tot2)) 129 | 130 | 131 | #******************************************************************************* 132 | #Check that sizes of rrr_con_csv and rrr_srt_csv are the same 133 | #******************************************************************************* 134 | if IS_riv_tot1==IS_riv_tot2: 135 | IS_riv_tot=IS_riv_tot1 136 | else: 137 | print('ERROR - The number of river reaches in rrr_con_csv and in ' \ 138 | 'rrr_srt_csv differ') 139 | raise SystemExit(22) 140 | 141 | 142 | #******************************************************************************* 143 | #Assign sort values to each reach in basin 144 | #******************************************************************************* 145 | IM_hsh={} 146 | for JS_riv_tot in range(IS_riv_tot): 147 | IM_hsh[IV_riv_tot_id[JS_riv_tot]]=JS_riv_tot 148 | 149 | IV_riv_bas_sort=[] 150 | for JS_riv_bas in range(IS_riv_bas): 151 | if IV_riv_bas_id[JS_riv_bas] in IM_hsh: 152 | JS_riv_tot=IM_hsh[IV_riv_bas_id[JS_riv_bas]] 153 | IV_riv_bas_sort.append(IV_riv_tot_sort[JS_riv_tot]) 154 | else: 155 | print('ERROR - Reach ID '+str(IV_riv_bas_id[JS_riv_bas])+'is not in '\ 156 | 'rrr_con_csv') 157 | raise SystemExit(22) 158 | 159 | 160 | #******************************************************************************* 161 | #Sort files 162 | #******************************************************************************* 163 | print('Sorting') 164 | z=zip(*sorted(zip(IV_riv_bas_sort,IV_riv_bas_id), reverse=True, \ 165 | key=lambda x: x[0]) ) 166 | IV_riv_bas_sort2, IV_riv_bas_id2=z 167 | 168 | #print(IV_riv_bas_sort2[0]) 169 | #print(IV_riv_bas_sort[0]) 170 | 171 | 172 | #******************************************************************************* 173 | #Write outputs 174 | #******************************************************************************* 175 | print('Writing file') 176 | 177 | with open(rrr_riv_csv, 'w') as csvfile: 178 | csvwriter = csv.writer(csvfile, dialect='excel') 179 | for JS_riv_bas in range(IS_riv_bas): 180 | csvwriter.writerow([IV_riv_bas_id2[JS_riv_bas]]) 181 | 182 | 183 | #******************************************************************************* 184 | #End 185 | #******************************************************************************* 186 | -------------------------------------------------------------------------------- /src/rrr_riv_bas_gen_one_nhdplus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_riv_bas_gen_one_nhdplus.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a river shapefile from NHDPlus, a connectivity table and associated 8 | #sorting integer, this program creates a csv file with the following 9 | #information: 10 | # - rrr_riv_file 11 | # . River ID (sorted from upstream to downstream) 12 | #Author: 13 | #Cedric H. David, 2007-2023 14 | 15 | 16 | #******************************************************************************* 17 | #Import Python modules 18 | #******************************************************************************* 19 | import sys 20 | import csv 21 | import dbf 22 | 23 | 24 | #******************************************************************************* 25 | #Declaration of variables (given as command line arguments) 26 | #******************************************************************************* 27 | # 1 - nhd_riv_file 28 | # 2 - rrr_con_file 29 | # 3 - rrr_srt_file 30 | # 4 - rrr_riv_file 31 | 32 | 33 | #******************************************************************************* 34 | #Get command line arguments 35 | #******************************************************************************* 36 | IS_arg=len(sys.argv) 37 | if IS_arg != 5: 38 | print('ERROR - 4 and only 4 arguments can be used') 39 | raise SystemExit(22) 40 | 41 | nhd_riv_file=sys.argv[1] 42 | rrr_con_file=sys.argv[2] 43 | rrr_srt_file=sys.argv[3] 44 | rrr_riv_file=sys.argv[4] 45 | 46 | 47 | #******************************************************************************* 48 | #Print input information 49 | #******************************************************************************* 50 | print('Command line inputs') 51 | print('- '+nhd_riv_file) 52 | print('- '+rrr_srt_file) 53 | print('- '+rrr_riv_file) 54 | 55 | 56 | #******************************************************************************* 57 | #Check if files exist 58 | #******************************************************************************* 59 | try: 60 | with open(nhd_riv_file) as file: 61 | pass 62 | except IOError as e: 63 | print('ERROR - Unable to open '+nhd_riv_file) 64 | raise SystemExit(22) 65 | 66 | try: 67 | with open(rrr_con_file) as file: 68 | pass 69 | except IOError as e: 70 | print('ERROR - Unable to open '+rrr_con_file) 71 | raise SystemExit(22) 72 | 73 | try: 74 | with open(rrr_srt_file) as file: 75 | pass 76 | except IOError as e: 77 | print('ERROR - Unable to open '+rrr_srt_file) 78 | raise SystemExit(22) 79 | 80 | 81 | #******************************************************************************* 82 | #Read files 83 | #******************************************************************************* 84 | print('Reading input files') 85 | 86 | #------------------------------------------------------------------------------- 87 | #Basin file 88 | #------------------------------------------------------------------------------- 89 | nhd_riv_dbf=dbf.Table(nhd_riv_file) 90 | nhd_riv_dbf.open() 91 | 92 | IV_riv_bas_id=[] 93 | for record in nhd_riv_dbf: 94 | if record['flowdir'].strip()=='With Digitized': 95 | IV_riv_bas_id.append(record['comid']) 96 | 97 | IS_riv_bas=len(IV_riv_bas_id) 98 | 99 | print('- Number of reaches in basin file: '+str(len(nhd_riv_dbf))) 100 | print('- Number of reaches with known dir: '+str(IS_riv_bas)) 101 | 102 | #------------------------------------------------------------------------------- 103 | #Connectivity file 104 | #------------------------------------------------------------------------------- 105 | IV_riv_tot_id=[] 106 | with open(rrr_con_file,'r') as csvfile: 107 | csvreader=csv.reader(csvfile) 108 | for row in csvreader: 109 | IV_riv_tot_id.append(int(row[0])) 110 | IS_riv_tot1=len(IV_riv_tot_id) 111 | print('- Number of river reaches in rrr_con_file: '+str(IS_riv_tot1)) 112 | 113 | #------------------------------------------------------------------------------- 114 | #Sort file 115 | #------------------------------------------------------------------------------- 116 | IV_riv_tot_sort=[] 117 | with open(rrr_srt_file,'r') as csvfile: 118 | csvreader=csv.reader(csvfile) 119 | for row in csvreader: 120 | IV_riv_tot_sort.append(int(row[0])) 121 | IS_riv_tot2=len(IV_riv_tot_sort) 122 | print('- Number of river reaches in rrr_srt_file: '+str(IS_riv_tot2)) 123 | 124 | 125 | #******************************************************************************* 126 | #Check that sizes of rrr_con_file and rrr_srt_file are the same 127 | #******************************************************************************* 128 | if IS_riv_tot1==IS_riv_tot2: 129 | IS_riv_tot=IS_riv_tot1 130 | else: 131 | print('ERROR - The number of river reaches in rrr_con_file and in ' \ 132 | 'rrr_srt_file differ') 133 | raise SystemExit(22) 134 | 135 | 136 | #******************************************************************************* 137 | #Assign sort values to each reach in basin 138 | #******************************************************************************* 139 | IM_hsh={} 140 | for JS_riv_tot in range(IS_riv_tot): 141 | IM_hsh[IV_riv_tot_id[JS_riv_tot]]=JS_riv_tot 142 | 143 | IV_riv_bas_sort=[] 144 | for JS_riv_bas in range(IS_riv_bas): 145 | if IV_riv_bas_id[JS_riv_bas] in IM_hsh: 146 | JS_riv_tot=IM_hsh[IV_riv_bas_id[JS_riv_bas]] 147 | IV_riv_bas_sort.append(IV_riv_tot_sort[JS_riv_tot]) 148 | else: 149 | print('ERROR - Reach ID '+str(IV_riv_bas_id[JS_riv_bas])+'is not in '\ 150 | 'rrr_con_file') 151 | raise SystemExit(22) 152 | 153 | 154 | #******************************************************************************* 155 | #Sort files 156 | #******************************************************************************* 157 | print('Sorting') 158 | z=zip(*sorted(zip(IV_riv_bas_sort,IV_riv_bas_id), reverse=True, \ 159 | key=lambda x: x[0]) ) 160 | IV_riv_bas_sort2, IV_riv_bas_id2=z 161 | 162 | 163 | #******************************************************************************* 164 | #Write outputs 165 | #******************************************************************************* 166 | print('Writing file') 167 | 168 | with open(rrr_riv_file, 'w') as csvfile: 169 | csvwriter = csv.writer(csvfile, dialect='excel') 170 | for JS_riv_bas in range(IS_riv_bas): 171 | csvwriter.writerow([IV_riv_bas_id2[JS_riv_bas]]) 172 | 173 | 174 | #******************************************************************************* 175 | #End 176 | #******************************************************************************* 177 | -------------------------------------------------------------------------------- /src/rrr_riv_tot_scl_prm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_riv_tot_scl_prm.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given first-guess parameter files for the Muskingum k and x values, and 8 | #associated scaling factors, this program creates a series of csv files with the 9 | #following information: 10 | # - rrr_k_file 11 | # . k 12 | # - rrr_x_file 13 | # . x 14 | #Author: 15 | #Cedric H. David, 2007-2023 16 | 17 | 18 | #******************************************************************************* 19 | #Import Python modules 20 | #******************************************************************************* 21 | import sys 22 | import csv 23 | 24 | 25 | #******************************************************************************* 26 | #Declaration of variables (given as command line arguments) 27 | #******************************************************************************* 28 | # 1 - rrr_kfc_file 29 | # 2 - rrr_xfc_file 30 | # 3 - ZS_lk 31 | # 4 - ZS_lx 32 | # 5 - rrr_k_file 33 | # 6 - rrr_x_file 34 | 35 | 36 | #******************************************************************************* 37 | #Get command line arguments 38 | #******************************************************************************* 39 | IS_arg=len(sys.argv) 40 | if IS_arg != 7: 41 | print('ERROR - 6 and only 6 arguments can be used') 42 | raise SystemExit(22) 43 | 44 | rrr_kfc_file=sys.argv[1] 45 | rrr_xfc_file=sys.argv[2] 46 | ZS_lk=float(sys.argv[3]) 47 | ZS_lx=float(sys.argv[4]) 48 | rrr_k_file=sys.argv[5] 49 | rrr_x_file=sys.argv[6] 50 | 51 | 52 | #******************************************************************************* 53 | #Print input information 54 | #******************************************************************************* 55 | print('Command line inputs') 56 | print('- '+rrr_kfc_file) 57 | print('- '+rrr_xfc_file) 58 | print('- '+str(ZS_lk)) 59 | print('- '+str(ZS_lx)) 60 | print('- '+rrr_k_file) 61 | print('- '+rrr_x_file) 62 | 63 | 64 | #******************************************************************************* 65 | #Check if files exist 66 | #******************************************************************************* 67 | try: 68 | with open(rrr_kfc_file) as file: 69 | pass 70 | except IOError as e: 71 | print('ERROR - Unable to open '+rrr_kfc_file) 72 | raise SystemExit(22) 73 | 74 | try: 75 | with open(rrr_xfc_file) as file: 76 | pass 77 | except IOError as e: 78 | print('ERROR - Unable to open '+rrr_xfc_file) 79 | raise SystemExit(22) 80 | 81 | 82 | #******************************************************************************* 83 | #Read files 84 | #******************************************************************************* 85 | print('Reading input files') 86 | 87 | #------------------------------------------------------------------------------- 88 | #kfac file 89 | #------------------------------------------------------------------------------- 90 | ZV_kfac=[] 91 | with open(rrr_kfc_file,'r') as csvfile: 92 | csvreader=csv.reader(csvfile) 93 | for row in csvreader: 94 | ZV_kfac.append(float(row[0])) 95 | IS_riv_tot1=len(ZV_kfac) 96 | print('- Number of river reaches in rrr_kfc_file: '+str(IS_riv_tot1)) 97 | 98 | #------------------------------------------------------------------------------- 99 | #xfac file 100 | #------------------------------------------------------------------------------- 101 | ZV_xfac=[] 102 | with open(rrr_xfc_file,'r') as csvfile: 103 | csvreader=csv.reader(csvfile) 104 | for row in csvreader: 105 | ZV_xfac.append(float(row[0])) 106 | IS_riv_tot2=len(ZV_xfac) 107 | print('- Number of river reaches in rrr_xfc_file: '+str(IS_riv_tot2)) 108 | 109 | 110 | #******************************************************************************* 111 | #Check that sizes of rapid_connect_file and sort_file are the same 112 | #******************************************************************************* 113 | if IS_riv_tot1==IS_riv_tot2: 114 | IS_riv_tot=IS_riv_tot1 115 | else: 116 | print('ERROR - The number of river reaches in rapid_connect_file and in ' \ 117 | 'sort_file differ') 118 | raise SystemExit(22) 119 | 120 | 121 | #******************************************************************************* 122 | #Routing parameters 123 | #******************************************************************************* 124 | print('Processing routing parameters') 125 | ZV_k=[float(0)] * IS_riv_tot 126 | ZV_x=[float(0)] * IS_riv_tot 127 | 128 | for JS_riv_tot in range(IS_riv_tot): 129 | ZV_k[JS_riv_tot]=ZV_kfac[JS_riv_tot]*ZS_lk 130 | ZV_x[JS_riv_tot]=ZV_xfac[JS_riv_tot]*ZS_lx 131 | 132 | 133 | #******************************************************************************* 134 | #Write outputs 135 | #******************************************************************************* 136 | print('Writing files') 137 | 138 | with open(rrr_k_file, 'w') as csvfile: 139 | csvwriter = csv.writer(csvfile, dialect='excel') 140 | for JS_riv_tot in range(IS_riv_tot): 141 | csvwriter.writerow([round(ZV_k[JS_riv_tot],4)]) 142 | 143 | with open(rrr_x_file, 'w') as csvfile: 144 | csvwriter = csv.writer(csvfile, dialect='excel') 145 | for JS_riv_tot in range(IS_riv_tot): 146 | csvwriter.writerow([round(ZV_x[JS_riv_tot],4)]) 147 | 148 | 149 | #******************************************************************************* 150 | #End 151 | #******************************************************************************* 152 | -------------------------------------------------------------------------------- /src/rrr_riv_tot_trm_shp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #rrr_riv_tot_trm_shp.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a shapefile, an attribute name, and a threshold value for that attribute, 8 | #this program creates a new shapefile that is similar to the input shapefile but 9 | #only retains those features for which the attribute has a value greater or 10 | #equal to the threshold. 11 | #Author: 12 | #Cedric H. David, 2022-2023 13 | 14 | 15 | #******************************************************************************* 16 | #Import Python modules 17 | #******************************************************************************* 18 | import sys 19 | import fiona 20 | 21 | 22 | #******************************************************************************* 23 | #Declaration of variables (given as command line arguments) 24 | #******************************************************************************* 25 | # 1 - rrr_riv_shp 26 | # 2 - YS_trm 27 | # 3 - ZS_trm 28 | # 4 - rrr_trm_shp 29 | 30 | 31 | #******************************************************************************* 32 | #Get command line arguments 33 | #******************************************************************************* 34 | IS_arg=len(sys.argv) 35 | if IS_arg != 5: 36 | print('ERROR - 4 and only 4 arguments can be used') 37 | raise SystemExit(22) 38 | 39 | rrr_riv_shp=sys.argv[1] 40 | YS_trm=sys.argv[2] 41 | ZS_trm=float(sys.argv[3]) 42 | rrr_trm_shp=sys.argv[4] 43 | 44 | 45 | #******************************************************************************* 46 | #Print input information 47 | #******************************************************************************* 48 | print('Command line inputs') 49 | print('- '+rrr_riv_shp) 50 | print('- '+YS_trm) 51 | print('- '+str(ZS_trm)) 52 | print('- '+rrr_trm_shp) 53 | 54 | 55 | #******************************************************************************* 56 | #Check if files exist 57 | #******************************************************************************* 58 | try: 59 | with open(rrr_riv_shp) as file: 60 | pass 61 | except IOError as e: 62 | print('ERROR - Unable to open '+rrr_riv_shp) 63 | raise SystemExit(22) 64 | 65 | 66 | #******************************************************************************* 67 | #Read shapefile 68 | #******************************************************************************* 69 | print('Read shapefile') 70 | 71 | #------------------------------------------------------------------------------- 72 | #Open file 73 | #------------------------------------------------------------------------------- 74 | print('- Open file') 75 | 76 | rrr_riv_lay=fiona.open(rrr_riv_shp, 'r') 77 | IS_riv_tot=len(rrr_riv_lay) 78 | print('- The number of river features is: '+str(IS_riv_tot)) 79 | 80 | #------------------------------------------------------------------------------- 81 | #Read attributes 82 | #------------------------------------------------------------------------------- 83 | print('- Read attributes') 84 | 85 | if YS_trm not in rrr_riv_lay[0]['properties']: 86 | print('ERROR - The '+YS_trm+' attribute does not exist in '+rrr_riv_shp) 87 | raise SystemExit(22) 88 | 89 | #------------------------------------------------------------------------------- 90 | #Read driver, crs, and schema 91 | #------------------------------------------------------------------------------- 92 | print('- Read driver, crs and schema') 93 | 94 | rrr_riv_drv=rrr_riv_lay.driver 95 | rrr_riv_crs=rrr_riv_lay.crs 96 | rrr_riv_sch=rrr_riv_lay.schema 97 | 98 | 99 | #******************************************************************************* 100 | #Create a trimmed shapefile based on the threshold 101 | #******************************************************************************* 102 | print('Create a trimmed shapefile based on the threshold') 103 | 104 | rrr_trm_drv=rrr_riv_drv 105 | rrr_trm_crs=rrr_riv_crs 106 | rrr_trm_sch=rrr_riv_sch 107 | 108 | with fiona.open(rrr_trm_shp,'w',driver=rrr_trm_drv, \ 109 | crs=rrr_trm_crs, \ 110 | schema=rrr_trm_sch) as rrr_trm_lay: 111 | JS_riv_trm=0 112 | for JS_riv_tot in range(IS_riv_tot): 113 | if rrr_riv_lay[JS_riv_tot]['properties'][YS_trm] >= ZS_trm: 114 | rrr_trm_prp=rrr_riv_lay[JS_riv_tot]['properties'] 115 | rrr_trm_geo=rrr_riv_lay[JS_riv_tot]['geometry'] 116 | rrr_trm_lay.write({ \ 117 | 'properties': rrr_trm_prp, \ 118 | 'geometry': rrr_trm_geo, \ 119 | }) 120 | 121 | print(' - New shapefile created') 122 | 123 | 124 | #******************************************************************************* 125 | #End 126 | #******************************************************************************* 127 | -------------------------------------------------------------------------------- /tst/tst_chk_ncf_fil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_chk_ncf_fil.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Check netCDF files for fill values, returns error (exit 22) if found. 8 | #Author: 9 | #Cedric H. David, 2017-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Prerequisites 14 | #******************************************************************************* 15 | import sys 16 | import netCDF4 17 | import numpy 18 | 19 | 20 | #******************************************************************************* 21 | #Declaration of variables (given as command line arguments) 22 | #******************************************************************************* 23 | # 1 - rrr_ncf_file 24 | 25 | 26 | #******************************************************************************* 27 | #Get command line arguments 28 | #******************************************************************************* 29 | IS_arg=len(sys.argv) 30 | if IS_arg != 2: 31 | print('ERROR - 1 and ony 1 argument must be used') 32 | raise SystemExit(22) 33 | 34 | rrr_ncf_file=sys.argv[1] 35 | 36 | 37 | #******************************************************************************* 38 | #Print current variables 39 | #******************************************************************************* 40 | print('Checking netCDF file') 41 | print('netCDF file :'+rrr_ncf_file) 42 | print('-------------------------------') 43 | 44 | 45 | #******************************************************************************* 46 | #Test if input files exist 47 | #******************************************************************************* 48 | try: 49 | with open(rrr_ncf_file) as file: 50 | pass 51 | except IOError as e: 52 | print('Unable to open '+rrr_ncf_file) 53 | raise SystemExit(22) 54 | 55 | 56 | #******************************************************************************* 57 | #Read and check netCDF file 58 | #******************************************************************************* 59 | 60 | #------------------------------------------------------------------------------- 61 | #Open file and get dimensions 62 | #------------------------------------------------------------------------------- 63 | f = netCDF4.Dataset(rrr_ncf_file, "r") 64 | 65 | if 'COMID' in f.dimensions: 66 | IS_riv_tot=len(f.dimensions['COMID']) 67 | elif 'rivid' in f.dimensions: 68 | IS_riv_tot=len(f.dimensions['rivid']) 69 | else: 70 | print('ERROR - Neither COMID nor rivid are dimensions in: '+rrr_ncf_file) 71 | raise SystemExit(99) 72 | 73 | if 'Time' in f.dimensions: 74 | IS_time=len(f.dimensions['Time']) 75 | elif 'time' in f.dimensions: 76 | IS_time=len(f.dimensions['time']) 77 | else: 78 | print('ERROR - Neither Time nor time are dimensions in: '+rrr_ncf_file) 79 | raise SystemExit(99) 80 | 81 | if 'm3_riv' in f.variables: 82 | rrr_ncf_var='m3_riv' 83 | elif 'Qout' in f.variables: 84 | rrr_ncf_var='Qout' 85 | elif 'V' in f.variables: 86 | rrr_ncf_var='V' 87 | else: 88 | print('ERROR - m3_riv, Qout, or V are not variables in: '+rrr_ncf_file) 89 | raise SystemExit(99) 90 | 91 | #------------------------------------------------------------------------------- 92 | #Print file sizes and variable name 93 | #------------------------------------------------------------------------------- 94 | print('Number of river reaches :'+str(IS_riv_tot)) 95 | print('Number of time steps :'+str(IS_time)) 96 | print('Variable name :'+rrr_ncf_var) 97 | print('-------------------------------') 98 | 99 | #------------------------------------------------------------------------------- 100 | #Check for fillvalues 101 | #------------------------------------------------------------------------------- 102 | BS_error=False 103 | 104 | for JS_time in range(IS_time): 105 | 106 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 107 | #Read values 108 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 109 | ZV_var=f.variables[rrr_ncf_var][JS_time,:] 110 | 111 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 112 | #Check for masked array 113 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 114 | if(isinstance(ZV_var,numpy.ma.MaskedArray)): 115 | BS_error=BS_error or True 116 | print('ERROR at time step: '+str(JS_time)+', MaskedArray detected, ' \ 117 | +'number of masked values: '+str(numpy.ma.count_masked(ZV_var))) 118 | 119 | 120 | #******************************************************************************* 121 | #Exit if a MaskedArray was detected 122 | #******************************************************************************* 123 | if (BS_error): 124 | raise SystemExit(22) 125 | 126 | print('No MaskedArray found!!!') 127 | print('-------------------------------') 128 | 129 | 130 | #******************************************************************************* 131 | #End 132 | #******************************************************************************* 133 | -------------------------------------------------------------------------------- /tst/tst_chk_ncf_neg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_chk_ncf_neg.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Check netCDF files for negative values, returns error (exit 22) if found. 8 | #Author: 9 | #Cedric H. David, 2022-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Prerequisites 14 | #******************************************************************************* 15 | import sys 16 | import netCDF4 17 | import numpy 18 | 19 | 20 | #******************************************************************************* 21 | #Declaration of variables (given as command line arguments) 22 | #******************************************************************************* 23 | # 1 - rrr_ncf_file 24 | 25 | 26 | #******************************************************************************* 27 | #Get command line arguments 28 | #******************************************************************************* 29 | IS_arg=len(sys.argv) 30 | if IS_arg != 2: 31 | print('ERROR - 1 and ony 1 argument must be used') 32 | raise SystemExit(22) 33 | 34 | rrr_ncf_file=sys.argv[1] 35 | 36 | 37 | #******************************************************************************* 38 | #Print current variables 39 | #******************************************************************************* 40 | print('Checking netCDF file') 41 | print('netCDF file :'+rrr_ncf_file) 42 | print('-------------------------------') 43 | 44 | 45 | #******************************************************************************* 46 | #Test if input files exist 47 | #******************************************************************************* 48 | try: 49 | with open(rrr_ncf_file) as file: 50 | pass 51 | except IOError as e: 52 | print('Unable to open '+rrr_ncf_file) 53 | raise SystemExit(22) 54 | 55 | 56 | #******************************************************************************* 57 | #Read and check netCDF file 58 | #******************************************************************************* 59 | 60 | #------------------------------------------------------------------------------- 61 | #Open file and get dimensions 62 | #------------------------------------------------------------------------------- 63 | f = netCDF4.Dataset(rrr_ncf_file, "r") 64 | 65 | if 'COMID' in f.dimensions: 66 | IS_riv_tot=len(f.dimensions['COMID']) 67 | elif 'rivid' in f.dimensions: 68 | IS_riv_tot=len(f.dimensions['rivid']) 69 | else: 70 | print('ERROR - Neither COMID nor rivid are dimensions in: '+rrr_ncf_file) 71 | raise SystemExit(99) 72 | 73 | if 'Time' in f.dimensions: 74 | IS_time=len(f.dimensions['Time']) 75 | elif 'time' in f.dimensions: 76 | IS_time=len(f.dimensions['time']) 77 | else: 78 | print('ERROR - Neither Time nor time are dimensions in: '+rrr_ncf_file) 79 | raise SystemExit(99) 80 | 81 | if 'm3_riv' in f.variables: 82 | rrr_ncf_var='m3_riv' 83 | elif 'Qout' in f.variables: 84 | rrr_ncf_var='Qout' 85 | elif 'V' in f.variables: 86 | rrr_ncf_var='V' 87 | else: 88 | print('ERROR - m3_riv, Qout, or V are not variables in: '+rrr_ncf_file) 89 | raise SystemExit(99) 90 | 91 | #------------------------------------------------------------------------------- 92 | #Print file sizes and variable name 93 | #------------------------------------------------------------------------------- 94 | print('Number of river reaches :'+str(IS_riv_tot)) 95 | print('Number of time steps :'+str(IS_time)) 96 | print('Variable name :'+rrr_ncf_var) 97 | print('-------------------------------') 98 | 99 | #------------------------------------------------------------------------------- 100 | #Check for negative values 101 | #------------------------------------------------------------------------------- 102 | IV_neg=numpy.zeros(IS_riv_tot,dtype=int) 103 | #number of negative values for each river reach throughout timeseries 104 | 105 | for JS_time in range(IS_time): 106 | 107 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 108 | #Read values 109 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 110 | ZV_var=f.variables[rrr_ncf_var][JS_time,:] 111 | 112 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 113 | #Find and count negative values 114 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 115 | IV_neg=numpy.where(ZV_var<0,IV_neg+1,IV_neg) 116 | 117 | IS_riv_neg=numpy.count_nonzero(IV_neg) 118 | IS_max_neg=numpy.amax(IV_neg) 119 | 120 | print('Number of river reaches with at least one negative value for ' \ 121 | +rrr_ncf_var+': '+str(IS_riv_neg)) 122 | print('Maximum number of negative values in any river reach for ' \ 123 | +rrr_ncf_var+': '+str(IS_max_neg)) 124 | 125 | 126 | #******************************************************************************* 127 | #Exit if a negative value was was detected 128 | #******************************************************************************* 129 | if (IS_riv_neg>0): 130 | print('ERROR') 131 | raise SystemExit(22) 132 | else: 133 | print('No negative value found!!!') 134 | print('-------------------------------') 135 | 136 | 137 | #******************************************************************************* 138 | #End 139 | #******************************************************************************* 140 | -------------------------------------------------------------------------------- /tst/tst_chk_srt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_chk_srt.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Given a river connectivity file and a river ID file, this program checks that 8 | #the river ID file is sorted from upstream to downstream. The river ID file 9 | #can contain all the rivers of the domain, or only a subset of it. 10 | #Author: 11 | #Cedric H. David, 2007-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Import Python modules 16 | #******************************************************************************* 17 | import sys 18 | import csv 19 | 20 | 21 | #******************************************************************************* 22 | #Declaration of variables (given as command line arguments) 23 | #******************************************************************************* 24 | # 1 - rrr_con_file 25 | # 2 - rrr_riv_file 26 | 27 | 28 | #******************************************************************************* 29 | #Get command line arguments 30 | #******************************************************************************* 31 | IS_arg=len(sys.argv) 32 | if IS_arg < 3 or IS_arg > 3: 33 | print('ERROR - 2 and only 2 arguments can be used') 34 | raise SystemExit(22) 35 | 36 | rrr_con_file=sys.argv[1] 37 | rrr_riv_file=sys.argv[2] 38 | 39 | 40 | #******************************************************************************* 41 | #Print input information 42 | #******************************************************************************* 43 | print('Command line inputs') 44 | print('- '+rrr_con_file) 45 | print('- '+rrr_riv_file) 46 | 47 | 48 | #******************************************************************************* 49 | #Check if files exist 50 | #******************************************************************************* 51 | try: 52 | with open(rrr_con_file) as file: 53 | pass 54 | except IOError as e: 55 | print('ERROR - Unable to open '+rrr_con_file) 56 | raise SystemExit(22) 57 | 58 | try: 59 | with open(rrr_riv_file) as file: 60 | pass 61 | except IOError as e: 62 | print('ERROR - Unable to open '+rrr_riv_file) 63 | raise SystemExit(22) 64 | 65 | 66 | #******************************************************************************* 67 | #Read files 68 | #******************************************************************************* 69 | print('Reading input files') 70 | 71 | #------------------------------------------------------------------------------- 72 | #rrr_con_file 73 | #------------------------------------------------------------------------------- 74 | IV_riv_tot_id=[] 75 | IV_down_id=[] 76 | with open(rrr_con_file,'r') as csvfile: 77 | csvreader=csv.reader(csvfile) 78 | for row in csvreader: 79 | IV_riv_tot_id.append(int(row[0])) 80 | IV_down_id.append(int(row[1])) 81 | IS_riv_tot=len(IV_riv_tot_id) 82 | print('- Number of river reaches in rrr_con_file: '+str(IS_riv_tot)) 83 | 84 | #------------------------------------------------------------------------------- 85 | #rrr_riv_file 86 | #------------------------------------------------------------------------------- 87 | IV_riv_bas_id=[] 88 | with open(rrr_riv_file,'r') as csvfile: 89 | csvreader=csv.reader(csvfile) 90 | for row in csvreader: 91 | IV_riv_bas_id.append(int(row[0])) 92 | IS_riv_bas=len(IV_riv_bas_id) 93 | print('- Number of river reaches in rrr_riv_file: '+str(IS_riv_bas)) 94 | 95 | 96 | #******************************************************************************* 97 | #Checking upstream to downstream sorting 98 | #******************************************************************************* 99 | print('Checking upstream to downstream sorting') 100 | 101 | #------------------------------------------------------------------------------- 102 | #Create hash table 103 | #------------------------------------------------------------------------------- 104 | IM_hsh={} 105 | for JS_riv_bas in range(IS_riv_bas): 106 | IM_hsh[IV_riv_bas_id[JS_riv_bas]]=JS_riv_bas 107 | #This hash table contains the index of each reach ID in rrr_riv_file 108 | 109 | #------------------------------------------------------------------------------- 110 | #Check sorting 111 | #------------------------------------------------------------------------------- 112 | for JS_riv_tot in range(IS_riv_tot): 113 | #Looping through all reach IDs in rrr_con_file 114 | if IV_riv_tot_id[JS_riv_tot] in IM_hsh: 115 | #Checking that the reach ID in rrr_con_file is in rrr_riv_file 116 | JS_riv_bas1=IM_hsh[IV_riv_tot_id[JS_riv_tot]] 117 | #JS_riv_bas1 is the index of reach ID in rrr_riv_file 118 | if IV_down_id[JS_riv_tot] in IM_hsh: 119 | #checking that the ID downstream of JS_riv_bas1 is in rrr_riv_file 120 | JS_riv_bas2=IM_hsh[IV_down_id[JS_riv_tot]] 121 | #JS_riv_bas2 is the index of the downstream ID in rrr_riv_file 122 | else: 123 | JS_riv_bas2=IS_riv_bas 124 | #Largest value if downstream ID not in rrr_riv_file (also 125 | #applies to ID=0. 126 | if JS_riv_bas1 > JS_riv_bas2: 127 | #checking that ID downstream is not earlier in rrr_riv_file 128 | print('ERROR - rrr_riv_file not sorted from upstream to ' \ 129 | 'downstream') 130 | print('Reach ID '+str(IV_riv_tot_id[JS_riv_bas1])+ \ 131 | ' is located above of '+str(IV_down_id[JS_riv_bas1])) 132 | raise SystemExit(22) 133 | print('Success!!!') 134 | 135 | 136 | #******************************************************************************* 137 | #End 138 | #******************************************************************************* 139 | -------------------------------------------------------------------------------- /tst/tst_cmp_csv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_cmp_csv.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Compare csv files. 8 | #Author: 9 | #Cedric H. David, 2015-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Prerequisites 14 | #******************************************************************************* 15 | import sys 16 | import csv 17 | 18 | 19 | #******************************************************************************* 20 | #Declaration of variables (given as command line arguments) 21 | #******************************************************************************* 22 | # 1 - csv_file1 23 | # 2 - csv_file2 24 | #(3)- relative tolerance 25 | #(4)- absolute tolerance 26 | 27 | 28 | #******************************************************************************* 29 | #Get command line arguments 30 | #******************************************************************************* 31 | IS_arg=len(sys.argv) 32 | if IS_arg < 3 or IS_arg > 5: 33 | print('ERROR - A minimum of 2 and a maximum of 4 arguments can be used') 34 | raise SystemExit(22) 35 | 36 | csv_file1=sys.argv[1] 37 | csv_file2=sys.argv[2] 38 | if IS_arg > 3: 39 | ZS_rtol=float(sys.argv[3]) 40 | else: 41 | ZS_rtol=float(0) 42 | if IS_arg > 4: 43 | ZS_atol=float(sys.argv[4]) 44 | else: 45 | ZS_atol=float(0) 46 | 47 | 48 | #******************************************************************************* 49 | #Print current variables 50 | #******************************************************************************* 51 | print('Comparing CSV files') 52 | print('1st CSV file :'+csv_file1) 53 | print('2nd CSV file :'+csv_file2) 54 | print('Relative tolerance :'+str(ZS_rtol)) 55 | print('Absolute tolerance :'+str(ZS_atol)) 56 | print('-------------------------------') 57 | 58 | 59 | #******************************************************************************* 60 | #Test if input files exist 61 | #******************************************************************************* 62 | try: 63 | with open(csv_file1) as file: 64 | pass 65 | except IOError as e: 66 | print('Unable to open '+csv_file1) 67 | raise SystemExit(22) 68 | 69 | try: 70 | with open(csv_file2) as file: 71 | pass 72 | except IOError as e: 73 | print('Unable to open '+csv_file2) 74 | raise SystemExit(22) 75 | 76 | 77 | #******************************************************************************* 78 | #Read all files 79 | #******************************************************************************* 80 | ZM_csv1=[] 81 | IS_col1=0 82 | with open(csv_file1) as csv_file: 83 | reader=csv.reader(csv_file,dialect='excel') 84 | for row in reader: 85 | row=list(filter(lambda x: x!='',row)) 86 | #Removes the empty strings created by csv.reader for trailing commas 87 | for JS_col in range(len(row)): 88 | try: 89 | row[JS_col]=int(row[JS_col]) 90 | except ValueError: 91 | try: 92 | row[JS_col]=float(row[JS_col]) 93 | except ValueError: 94 | row[JS_col]=str(row[JS_col]) 95 | ZM_csv1.append(row) 96 | IS_row1=len(ZM_csv1) 97 | IS_col1=len(row) 98 | 99 | for row in ZM_csv1: 100 | if len(row) != IS_col1: 101 | #Check that the number of columns is always the same 102 | print('ERROR - Inconsistent number of columns in '+csv_file1) 103 | raise SystemExit(22) 104 | 105 | ZM_csv2=[] 106 | IS_col2=0 107 | with open(csv_file2) as csv_file: 108 | reader=csv.reader(csv_file,dialect='excel') 109 | for row in reader: 110 | row=list(filter(lambda x: x!='',row)) 111 | #Removes the empty strings created by csv.reader for trailing commas 112 | for JS_col in range(len(row)): 113 | try: 114 | row[JS_col]=int(row[JS_col]) 115 | except ValueError: 116 | try: 117 | row[JS_col]=float(row[JS_col]) 118 | except ValueError: 119 | row[JS_col]=str(row[JS_col]) 120 | ZM_csv2.append(row) 121 | IS_row2=len(ZM_csv2) 122 | IS_col2=len(row) 123 | 124 | for row in ZM_csv2: 125 | if len(row) != IS_col2: 126 | #Check that the number of columns is always the same 127 | print('ERROR - Inconsistent number of columns in '+csv_file2) 128 | raise SystemExit(22) 129 | 130 | 131 | #******************************************************************************* 132 | #Compare file sizes 133 | #******************************************************************************* 134 | if IS_row1==IS_row2: 135 | IS_row=IS_row1 136 | print('Common number of rows: '+str(IS_row)) 137 | else: 138 | print('ERROR - The number of rows are different: ' \ 139 | +str(IS_row1)+' <> '+str(IS_row2)) 140 | raise SystemExit(99) 141 | 142 | if IS_col1==IS_col2: 143 | IS_col=IS_col1 144 | print('Common number of columns: '+str(IS_col)) 145 | else: 146 | print('ERROR - The number of columns are different: ' \ 147 | +str(IS_col1)+' <> '+str(IS_col2)) 148 | raise SystemExit(99) 149 | 150 | print('-------------------------------') 151 | 152 | 153 | #******************************************************************************* 154 | #Compute differences 155 | #******************************************************************************* 156 | ZS_rdif_max=float(0) 157 | ZS_adif_max=float(0) 158 | for JS_row in range(IS_row): 159 | for JS_col in range(IS_col): 160 | if type(ZM_csv1[JS_row][JS_col]) is str: 161 | if ZM_csv1[JS_row][JS_col].strip() \ 162 | ==ZM_csv2[JS_row][JS_col].strip(): 163 | ZS_adif=0 164 | ZS_rdif=0 165 | else: 166 | print('ERROR!!! in comparison of strings: '+ \ 167 | ZM_csv1[JS_row][JS_col]+' differs from '+ \ 168 | ZM_csv2[JS_row][JS_col]) 169 | raise SystemExit(99) 170 | else: 171 | ZS_adif=abs(ZM_csv1[JS_row][JS_col]-ZM_csv2[JS_row][JS_col]) 172 | #Absolute difference computed 173 | if ZS_adif == 0: 174 | ZS_rdif=0 175 | else: 176 | ZS_rdif=2*ZS_adif/ \ 177 | abs(ZM_csv1[JS_row][JS_col]+ZM_csv2[JS_row][JS_col]) 178 | #Relative difference computed 179 | if ZS_adif > ZS_adif_max: 180 | ZS_adif_max=ZS_adif 181 | #Maximum absolute difference updated 182 | if ZS_rdif > ZS_rdif_max: 183 | ZS_rdif_max=ZS_rdif 184 | #Maximum relative difference updated 185 | 186 | print('Max relative difference :'+str(ZS_rdif_max)) 187 | print('Max absolute difference :'+str(ZS_adif_max)) 188 | print('-------------------------------') 189 | 190 | 191 | #******************************************************************************* 192 | #Compare csv files 193 | #******************************************************************************* 194 | if ZS_rdif_max > ZS_rtol: 195 | print('Unacceptable rel. difference!!!') 196 | print('-------------------------------') 197 | raise SystemExit(99) 198 | 199 | if ZS_adif_max > ZS_atol: 200 | print('Unacceptable abs. difference!!!') 201 | print('-------------------------------') 202 | raise SystemExit(99) 203 | 204 | print('CSV files similar!!!') 205 | print('Passed all tests!!!') 206 | print('-------------------------------') 207 | 208 | 209 | #******************************************************************************* 210 | #End 211 | #******************************************************************************* 212 | -------------------------------------------------------------------------------- /tst/tst_cmp_n1d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_cmp_n1d.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Compare one unique 1-D variable within two netCDF files. 8 | #Author: 9 | #Cedric H. David, 2016-2023 10 | 11 | 12 | #******************************************************************************* 13 | #Prerequisites 14 | #******************************************************************************* 15 | import sys 16 | import netCDF4 17 | import math 18 | import numpy 19 | 20 | 21 | #******************************************************************************* 22 | #Declaration of variables (given as command line arguments) 23 | #******************************************************************************* 24 | # 1 - rrr_ncf_file1 25 | # 2 - rrr_ncf_file2 26 | # 3 - rrr_ncf_var 27 | #(3)- relative tolerance 28 | #(4)- absolute tolerance 29 | 30 | 31 | #******************************************************************************* 32 | #Get command line arguments 33 | #******************************************************************************* 34 | IS_arg=len(sys.argv) 35 | if IS_arg < 4 or IS_arg > 6: 36 | print('ERROR - A minimum of 3 and a maximum of 5 arguments can be used') 37 | raise SystemExit(22) 38 | 39 | rrr_ncf_file1=sys.argv[1] 40 | rrr_ncf_file2=sys.argv[2] 41 | rrr_ncf_var=sys.argv[3] 42 | if IS_arg > 4: 43 | ZS_rtol=float(sys.argv[4]) 44 | else: 45 | ZS_rtol=float(0) 46 | if IS_arg > 5: 47 | ZS_atol=float(sys.argv[5]) 48 | else: 49 | ZS_atol=float(0) 50 | 51 | 52 | #******************************************************************************* 53 | #Print current variables 54 | #******************************************************************************* 55 | print('Comparing netCDF files') 56 | print('1st netCDF file :'+rrr_ncf_file1) 57 | print('2nd netCDF file :'+rrr_ncf_file2) 58 | print('Name of variable :'+rrr_ncf_var) 59 | print('Relative tolerance :'+str(ZS_rtol)) 60 | print('Absolute tolerance :'+str(ZS_atol)) 61 | print('-------------------------------') 62 | 63 | 64 | #******************************************************************************* 65 | #Test if input files exist 66 | #******************************************************************************* 67 | try: 68 | with open(rrr_ncf_file1) as file: 69 | pass 70 | except IOError as e: 71 | print('Unable to open '+rrr_ncf_file1) 72 | raise SystemExit(22) 73 | 74 | try: 75 | with open(rrr_ncf_file2) as file: 76 | pass 77 | except IOError as e: 78 | print('Unable to open '+rrr_ncf_file2) 79 | raise SystemExit(22) 80 | 81 | 82 | #******************************************************************************* 83 | #Read and compare netCDF files 84 | #******************************************************************************* 85 | 86 | #------------------------------------------------------------------------------- 87 | #Open files and get dimensions 88 | #------------------------------------------------------------------------------- 89 | f1 = netCDF4.Dataset(rrr_ncf_file1, "r") 90 | 91 | if 'COMID' in f1.dimensions: 92 | IS_riv_tot1=len(f1.dimensions['COMID']) 93 | elif 'rivid' in f1.dimensions: 94 | IS_riv_tot1=len(f1.dimensions['rivid']) 95 | else: 96 | print('ERROR - Neither COMID nor rivid are dimensions in: '+rrr_ncf_file1) 97 | raise SystemExit(99) 98 | 99 | if rrr_ncf_var not in f1.variables: 100 | print('ERROR - '+rrr_ncf_var+' is not a variables in: '+rrr_ncf_file1) 101 | raise SystemExit(99) 102 | 103 | f2 = netCDF4.Dataset(rrr_ncf_file2, "r") 104 | 105 | if 'COMID' in f2.dimensions: 106 | IS_riv_tot2=len(f2.dimensions['COMID']) 107 | elif 'rivid' in f2.dimensions: 108 | IS_riv_tot2=len(f2.dimensions['rivid']) 109 | else: 110 | print('ERROR - Neither COMID nor rivid are dimensions in: '+rrr_ncf_file2) 111 | raise SystemExit(99) 112 | 113 | if rrr_ncf_var not in f2.variables: 114 | print('ERROR - '+rrr_ncf_var+' is not a variables in: '+rrr_ncf_file2) 115 | raise SystemExit(99) 116 | 117 | #------------------------------------------------------------------------------- 118 | #Compare file sizes and variable names 119 | #------------------------------------------------------------------------------- 120 | if IS_riv_tot1==IS_riv_tot2: 121 | IS_riv_tot=IS_riv_tot1 122 | print('Common number of river reaches:'+str(IS_riv_tot)) 123 | else: 124 | print('ERROR - The number of river reaches differs: ' \ 125 | +str(IS_riv_tot1)+' <> '+str(IS_riv_tot2)) 126 | raise SystemExit(99) 127 | 128 | print('-------------------------------') 129 | 130 | #------------------------------------------------------------------------------- 131 | #Compare rivid values if they exist in both files 132 | #------------------------------------------------------------------------------- 133 | if 'COMID' in f1.variables: 134 | IV_riv_tot1=f1.variables['COMID'] 135 | elif 'rivid' in f1.variables: 136 | IV_riv_tot1=f1.variables['rivid'] 137 | 138 | if 'COMID' in f2.variables: 139 | IV_riv_tot2=f2.variables['COMID'] 140 | elif 'rivid' in f2.variables: 141 | IV_riv_tot2=f2.variables['rivid'] 142 | 143 | if 'IV_riv_tot1' in locals() and 'IV_riv_tot2' in locals(): 144 | #This makes sure that both variables actually exist before comparing them 145 | if numpy.array_equal(IV_riv_tot1[:],IV_riv_tot2[:]): 146 | print('The rivids are the same') 147 | else: 148 | print('ERROR: The rivids differ') 149 | raise SystemExit(99) 150 | print('-------------------------------') 151 | 152 | #------------------------------------------------------------------------------- 153 | #Compute differences 154 | #------------------------------------------------------------------------------- 155 | 156 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 157 | #initializing 158 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 159 | ZS_rdif=0 160 | ZS_adif=0 161 | 162 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 163 | #Reading values 164 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 165 | ZV_var_1=f1.variables[rrr_ncf_var][:] 166 | ZV_var_2=f2.variables[rrr_ncf_var][:] 167 | 168 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 169 | #Comparing difference values 170 | #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 171 | #Tried computations with regular Python lists but this makes is very slow. 172 | #Also tried using map(operator.sub,V,W) or [x-y for x,y in zip(V,W)] 173 | #But this still results in slow computations. 174 | #The best performance seems to be with Numpy. 175 | ZV_dvar_abs=numpy.absolute(ZV_var_1-ZV_var_2) 176 | 177 | ZS_adif=numpy.max(ZV_dvar_abs) 178 | 179 | ZS_rdif= math.sqrt( numpy.sum(ZV_dvar_abs*ZV_dvar_abs) \ 180 | /numpy.sum(ZV_var_1*ZV_var_1)) 181 | 182 | 183 | #******************************************************************************* 184 | #Print difference values and comparing values to tolerance 185 | #******************************************************************************* 186 | print('Max relative difference :'+'{0:.2e}'.format(ZS_rdif)) 187 | print('Max absolute difference :'+'{0:.2e}'.format(ZS_adif)) 188 | print('-------------------------------') 189 | 190 | if ZS_rdif > ZS_rtol: 191 | print('Unacceptable rel. difference!!!') 192 | print('-------------------------------') 193 | raise SystemExit(99) 194 | 195 | if ZS_adif > ZS_atol: 196 | print('Unacceptable abs. difference!!!') 197 | print('-------------------------------') 198 | raise SystemExit(99) 199 | 200 | print('netCDF files similar!!!') 201 | print('-------------------------------') 202 | 203 | 204 | #******************************************************************************* 205 | #End 206 | #******************************************************************************* 207 | -------------------------------------------------------------------------------- /tst/tst_cmp_shp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | #******************************************************************************* 3 | #tst_cmp_shp.py 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #Compare two shapefiles. The geometries are first checked, then all the 8 | #attributes of the first file are checked within the second file. 9 | #Author: 10 | #Cedric H. David, 2016-2023 11 | 12 | 13 | #******************************************************************************* 14 | #Prerequisites 15 | #******************************************************************************* 16 | import sys 17 | import fiona 18 | 19 | 20 | #******************************************************************************* 21 | #Declaration of variables (given as command line arguments) 22 | #******************************************************************************* 23 | # 1 - rrr_old_shp 24 | # 2 - rrr_new_shp 25 | 26 | 27 | #******************************************************************************* 28 | #Get command line arguments 29 | #******************************************************************************* 30 | IS_arg=len(sys.argv) 31 | if IS_arg != 3 : 32 | print('ERROR - 2 and only 2 arguments can be used') 33 | raise SystemExit(22) 34 | 35 | rrr_old_shp=sys.argv[1] 36 | rrr_new_shp=sys.argv[2] 37 | 38 | 39 | #******************************************************************************* 40 | #Print current variables 41 | #******************************************************************************* 42 | print('Command line inputs') 43 | print('- '+rrr_old_shp) 44 | print('- '+rrr_new_shp) 45 | 46 | 47 | #******************************************************************************* 48 | #Test if input files exist 49 | #******************************************************************************* 50 | try: 51 | with open(rrr_old_shp) as file: 52 | pass 53 | except IOError as e: 54 | print('Unable to open '+rrr_old_shp) 55 | raise SystemExit(22) 56 | 57 | try: 58 | with open(rrr_new_shp) as file: 59 | pass 60 | except IOError as e: 61 | print('Unable to open '+rrr_new_shp) 62 | raise SystemExit(22) 63 | 64 | 65 | #******************************************************************************* 66 | #Open rrr_old_shp 67 | #******************************************************************************* 68 | print('Open rrr_old_shp') 69 | 70 | rrr_old_lay=fiona.open(rrr_old_shp, 'r') 71 | 72 | IS_old_tot=len(rrr_old_lay) 73 | print('- The number of features is: '+str(IS_old_tot)) 74 | 75 | YV_old_prp=rrr_old_lay.schema['properties'].keys() 76 | print('- The number of attributes is: '+str(len(YV_old_prp))) 77 | 78 | 79 | #******************************************************************************* 80 | #Open rrr_new_shp 81 | #******************************************************************************* 82 | print('Open rrr_new_shp') 83 | 84 | rrr_new_lay=fiona.open(rrr_new_shp, 'r') 85 | 86 | IS_new_tot=len(rrr_new_lay) 87 | print('- The number of features is: '+str(IS_new_tot)) 88 | 89 | YV_new_prp=rrr_new_lay.schema['properties'].keys() 90 | print('- The number of attributes is: '+str(len(YV_new_prp))) 91 | 92 | 93 | #******************************************************************************* 94 | #Compare number of features 95 | #******************************************************************************* 96 | print('Compare number of features') 97 | if IS_old_tot==IS_new_tot: 98 | print('- The numbers of features are the same') 99 | else: 100 | print('ERROR - The numbers of features are different: ' \ 101 | +str(IS_old_tot)+' <> '+str(IS_new_tot)) 102 | raise SystemExit(99) 103 | 104 | 105 | #******************************************************************************* 106 | #Compare content of shapefiles 107 | #******************************************************************************* 108 | print('Compare content of shapefiles') 109 | 110 | for JS_old_tot in range(IS_old_tot): 111 | #-------------------------------------------------------------------------- 112 | #Extract the properties and geometry for the current feature of old file 113 | #-------------------------------------------------------------------------- 114 | rrr_old_fea=rrr_old_lay[JS_old_tot] 115 | rrr_old_prp=rrr_old_fea['properties'] 116 | rrr_old_geo=rrr_old_fea['geometry'] 117 | 118 | #-------------------------------------------------------------------------- 119 | #Extract the properties and geometry for the current feature of new file 120 | #-------------------------------------------------------------------------- 121 | rrr_new_fea=rrr_new_lay[JS_old_tot] 122 | rrr_new_prp=rrr_new_fea['properties'] 123 | rrr_new_geo=rrr_new_fea['geometry'] 124 | 125 | #-------------------------------------------------------------------------- 126 | #Compare geometry 127 | #-------------------------------------------------------------------------- 128 | if rrr_old_geo!=rrr_new_geo: 129 | print('ERROR - The geometries of features are different for index: ' \ 130 | +str(JS_old_tot)) 131 | raise SystemExit(99) 132 | 133 | #-------------------------------------------------------------------------- 134 | #Compare attributes 135 | #-------------------------------------------------------------------------- 136 | for YS_old_prp in YV_old_prp: 137 | if rrr_old_prp[YS_old_prp]!=rrr_new_prp[YS_old_prp]: 138 | print('ERROR - The attributes of features are different for '+ \ 139 | 'index: '+str(JS_old_tot)+', attribute: '+str(YS_old_prp)) 140 | raise SystemExit(99) 141 | 142 | print('Success!!!') 143 | 144 | 145 | #******************************************************************************* 146 | #End 147 | #******************************************************************************* 148 | -------------------------------------------------------------------------------- /tst/tst_pub_dwnl_Collins_etal_202x_TBD.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #******************************************************************************* 3 | #tst_pub_dwnl_Collins_etal_202x_TBD.sh 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #This script downloads all the files corresponding to: 8 | #Collins, E. L., 9 | #DOI: xx.xxxx/xxxxxxxxxxxx 10 | #The files used are available from: 11 | #Collins, E. L., 12 | #DOI: xx.xxxx/xxxxxxxxxxxx 13 | #The script returns the following exit codes 14 | # - 0 if all downloads are successful 15 | # - 22 if there was a conversion problem 16 | # - 44 if one download is not successful 17 | #Author: 18 | #Cedric H. David, 2022-2023. 19 | 20 | 21 | #******************************************************************************* 22 | #Notes on tricks used here 23 | #******************************************************************************* 24 | #wget -q -nc --> Quiet, No-clobber (don't overwrite) 25 | #wget -r --> Turn on recursive retrieving. 26 | #wget -nH --> Disable generation of host-prefixed directories. 27 | #wget ---cut-dirs=i --> Ignore i directory components when saving files. 28 | #wget -P --> Directory prefix where everything is downloaded 29 | 30 | 31 | #******************************************************************************* 32 | #Publication message 33 | #******************************************************************************* 34 | echo "********************" 35 | echo "Downloading files from: https://doi.org/xx.xxxx/xxxxxxxxxxxx" 36 | echo "which correspond to : https://doi.org/xx.xxxx/xxxxxxxxxxxx" 37 | echo "These files are under a Creative Commons Attribution (CC BY) license." 38 | echo "Please cite these two DOIs if using these files for your publications." 39 | echo "********************" 40 | 41 | 42 | #******************************************************************************* 43 | #Download GLDAS2 monthly files 44 | #******************************************************************************* 45 | 46 | #------------------------------------------------------------------------------- 47 | #Download parameters 48 | #------------------------------------------------------------------------------- 49 | fld="../input/GLDAS" 50 | exp="GLDAS" 51 | frq="M" 52 | mod=" \ 53 | CLSM \ 54 | NOAH \ 55 | VIC \ 56 | " 57 | str=( \ 58 | "1980-01-01T00:00:00" \ 59 | "1981-01-01T00:00:00" \ 60 | "1982-01-01T00:00:00" \ 61 | ) 62 | end=( \ 63 | "1980-12-31T23:59:59" \ 64 | "1981-12-31T23:59:59" \ 65 | "2009-12-31T23:59:59" \ 66 | ) 67 | 68 | #------------------------------------------------------------------------------- 69 | #Download process 70 | #------------------------------------------------------------------------------- 71 | mkdir -p $fld 72 | ndl=${#str[@]} 73 | #ndl is the number of download intervals 74 | 75 | for mod in $mod 76 | do 77 | for (( idl=0; idl<${ndl}; idl++ )); 78 | do 79 | echo "Downloading GLDAS2 monthly data for $mod, from" ${str[$idl]} "to" \ 80 | ${end[$idl]} 81 | ../src/rrr_lsm_tot_ldas.py $exp $mod $frq ${str[$idl]} ${end[$idl]} $fld > tmp_dwl 82 | if [ $? -gt 0 ] ; then echo "Problem downloading" && cat tmp_dwl >&2 ; exit 44 ; fi 83 | rm tmp_dwl 84 | done 85 | done 86 | 87 | 88 | #******************************************************************************* 89 | #Download MERIT Hydro Basins input files 90 | #******************************************************************************* 91 | 92 | #------------------------------------------------------------------------------- 93 | #Download parameters 94 | #------------------------------------------------------------------------------- 95 | URL="http://hydrology.princeton.edu/data/mpan/MERIT_Basins/MERIT_Hydro_v07_Basins_v01/zip/pfaf_level_02/" 96 | folder="../input/MH07B01_TBD" 97 | list=" \ 98 | pfaf_74_MERIT_Hydro_v07_Basins_v01.zip \ 99 | " 100 | 101 | #------------------------------------------------------------------------------- 102 | #Download process 103 | #------------------------------------------------------------------------------- 104 | mkdir -p $folder 105 | for file in $list 106 | do 107 | wget -nv -nc $URL/$file -P $folder 108 | if [ $? -gt 0 ] ; then echo "Problem downloading $file" >&2 ; exit 44 ; fi 109 | done 110 | 111 | 112 | #******************************************************************************* 113 | #Extract files 114 | #******************************************************************************* 115 | unzip -nq ../input/MH07B01_TBD/pfaf_74_MERIT_Hydro_v07_Basins_v01.zip -d ../input/MH07B01_TBD/ 116 | if [ $? -gt 0 ] ; then echo "Problem converting" >&2 ; exit 22 ; fi 117 | 118 | 119 | #******************************************************************************* 120 | #Done 121 | #******************************************************************************* 122 | -------------------------------------------------------------------------------- /version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #******************************************************************************* 3 | #version.sh 4 | #******************************************************************************* 5 | 6 | #Purpose: 7 | #This script allows determining the version of RRR that is being used, but only 8 | #if git is installed and if the RRR git repository is present. Otherwise 9 | #'unknown' is used. 10 | #Author: 11 | #Cedric H. David, 2016-2023 12 | 13 | 14 | #******************************************************************************* 15 | #Check if a program exists and perform tasks 16 | #******************************************************************************* 17 | if type 'git' > /dev/null; then 18 | #git is installed 19 | if git rev-parse --git-dir > /dev/null 2>&1; then 20 | #this is a git repository 21 | git describe 22 | else 23 | #this is not a git repository 24 | echo "unknown, NOT a git repository" 25 | fi 26 | else 27 | #git is not installed 28 | echo "unknown, git NOT installed" 29 | fi 30 | 31 | 32 | #******************************************************************************* 33 | #end 34 | #******************************************************************************* 35 | --------------------------------------------------------------------------------