├── .gitignore ├── CMake └── FindNetCDF.cmake ├── CMakeLists.txt ├── LICENSE ├── Make.defs ├── README.md ├── analysis └── blobmetrics │ ├── doc │ ├── BlobMetrics-UserGuide.aux │ ├── BlobMetrics-UserGuide.log │ ├── BlobMetrics-UserGuide.pdf │ ├── BlobMetrics-UserGuide.synctex.gz │ ├── BlobMetrics-UserGuide.tex │ ├── BlobMetrics-UserGuide.toc │ └── Framework_figure.pdf │ ├── download_libraries.R │ ├── examples │ ├── DJF_NP_CMCC-CESM-RCP8.5_CMCC-CESM_JRA_MERRA_CFSR_ERA_report.html │ ├── DJF_NP_model_reanalysis_dataset_namelist_master.R │ ├── JJA_SP_JRA_MERRA_CFSR_ERA_report.html │ ├── JJA_SP_reanalysis_dataset_namelist_master.R │ ├── blank_setupfile.R │ ├── setup_full.R │ └── setup_reanalysis.R │ ├── gen_blank_setupfile.R │ ├── generateNamelist.R │ ├── generateReport.R │ ├── intercomparison.R │ ├── mergetable.R │ ├── pearson.R │ ├── pearsonrmse.R │ ├── python │ ├── calcBlockIndices.py │ ├── mergeStats.py │ ├── readStats.py │ └── summStats.py │ ├── readfiles.R │ ├── readnetcdf.R │ ├── readtable.R │ ├── report_template.Rmd │ ├── stitch_metric_framework.R │ └── summarize.R ├── aux └── tc_basins_v1.dat ├── doc ├── TempestExtremes-UserGuide.aux ├── TempestExtremes-UserGuide.dvi ├── TempestExtremes-UserGuide.idx ├── TempestExtremes-UserGuide.ilg ├── TempestExtremes-UserGuide.ind ├── TempestExtremes-UserGuide.log ├── TempestExtremes-UserGuide.out ├── TempestExtremes-UserGuide.pdf ├── TempestExtremes-UserGuide.synctex.gz ├── TempestExtremes-UserGuide.tex └── TempestExtremes-UserGuide.toc ├── quick_make_unix.sh ├── remove_depend.sh ├── src ├── CMakeLists.txt ├── base │ ├── Announce.cpp │ ├── Announce.h │ ├── AutoCurator.cpp │ ├── AutoCurator.h │ ├── CMakeLists.txt │ ├── CommandLine.h │ ├── Constants.h │ ├── CoordTransforms.h │ ├── DataArray1D.h │ ├── DataArray2D.h │ ├── DataArray3D.h │ ├── DataArray4D.h │ ├── DataOp.cpp │ ├── DataOp.h │ ├── Defines.h │ ├── Exception.h │ ├── FilenameList.h │ ├── FiniteElementTools.cpp │ ├── FiniteElementTools.h │ ├── FourierTransforms.h │ ├── FunctionTimer.cpp │ ├── FunctionTimer.h │ ├── GaussLobattoQuadrature.cpp │ ├── GaussLobattoQuadrature.h │ ├── GaussQuadrature.cpp │ ├── GaussQuadrature.h │ ├── GridElements.cpp │ ├── GridElements.h │ ├── LatLonBox.h │ ├── LegendrePolynomial.cpp │ ├── LegendrePolynomial.h │ ├── MathExpression.h │ ├── MeshUtilities.cpp │ ├── MeshUtilities.h │ ├── MeshUtilitiesFuzzy.cpp │ ├── MeshUtilitiesFuzzy.h │ ├── NcFileVector.cpp │ ├── NcFileVector.h │ ├── NetCDFUtilities.cpp │ ├── NetCDFUtilities.h │ ├── NodeFileUtilities.cpp │ ├── NodeFileUtilities.h │ ├── PolynomialInterp.cpp │ ├── PolynomialInterp.h │ ├── RLLPolygonArray.cpp │ ├── RLLPolygonArray.h │ ├── STLStringHelper.h │ ├── ShpFile.cpp │ ├── ShpFile.h │ ├── SimpleGrid.cpp │ ├── SimpleGrid.h │ ├── SimpleGridUtilities.cpp │ ├── SimpleGridUtilities.h │ ├── SparseMatrix.h │ ├── Subscript.h │ ├── ThresholdOp.cpp │ ├── ThresholdOp.h │ ├── TimeMatch.h │ ├── TimeObj.cpp │ ├── TimeObj.h │ ├── Units.h │ ├── Variable.cpp │ ├── Variable.h │ ├── kdtree.cpp │ ├── kdtree.h │ ├── lodepng.cpp │ ├── lodepng.h │ └── order32.h ├── blobs │ ├── BlobStats.cpp │ ├── BlobUtilities.h │ ├── CMakeLists.txt │ ├── DetectBlobs.cpp │ ├── PersistentBlobs.cpp │ └── StitchBlobs.cpp ├── blocking │ ├── AvgVar.cpp │ ├── BlockingDFT.cpp │ ├── BlockingDevs.cpp │ ├── BlockingGHG.cpp │ ├── BlockingNormDevs.cpp │ ├── BlockingPV.cpp │ ├── BlockingThresh.cpp │ ├── BlockingUtilities.cpp │ ├── BlockingUtilities.h │ ├── CMakeLists.txt │ ├── CombineBlobs.cpp │ ├── DFT.cpp │ ├── DFT.h │ ├── DailyAverage.cpp │ ├── DensityCalculations.cpp │ ├── DetrendHeights.cpp │ ├── ExtractTimeStep.cpp │ ├── Interp_z500.cpp │ ├── Interp_z500.h │ ├── Interpolate.cpp │ ├── Interpolate.h │ ├── Smooth61Day.cpp │ ├── SplitFile.cpp │ ├── Var4Dto3D.cpp │ └── calcLinReg.py ├── netcdf-cxx-4.2 │ ├── CMakeLists.txt │ ├── COPYRIGHT │ ├── README │ ├── config.h │ ├── ncvalues.cpp │ ├── ncvalues.h │ ├── netcdf.cpp │ ├── netcdf.hh │ └── netcdfcpp.h ├── nodes │ ├── CMakeLists.txt │ ├── CalculationList.cpp │ ├── CalculationList.h │ ├── ClosedContourOp.h │ ├── DetectNodes.cpp │ ├── HistogramNodes.cpp │ ├── NodeFileCompose.cpp │ ├── NodeFileEditor.cpp │ ├── NodeFileFilter.cpp │ ├── NodeOutputOp.h │ └── StitchNodes.cpp ├── sandbox │ ├── CMakeLists.txt │ ├── CompressBlobs.cpp │ └── SpineARs.cpp └── util │ ├── AccumulateData.cpp │ ├── AccumulateERA5Forecast.cpp │ ├── AutoCuratorTool.cpp │ ├── CMakeLists.txt │ ├── Climatology.cpp │ ├── FourierFilter.cpp │ ├── GenerateConnectivityFile.cpp │ ├── GenerateNearestNeighborMap.cpp │ ├── IntegrateDimension.cpp │ ├── LagrangianParcelTracker.cpp │ ├── QuantileCalculator.cpp │ ├── ShapefileMask.cpp │ └── VariableProcessor.cpp └── test ├── control_shell ├── CLIVAR_BLOBS_GH_LOOP.sh ├── CLIVAR_BLOBS_PV_LOOP.sh ├── ERA_BLOBS_CONST_LOOP.sh ├── ERA_BLOBS_LOOP.sh ├── ERA_DETECT_LOOP.sh ├── ERA_GHAnom_controls.sh ├── GH_script.sh ├── PV_script.sh ├── interp_clivar_to_climo.ncl └── new_splits.sh └── import_scripts ├── auto_workflow.py ├── batch_anom_import.py ├── batch_import.py ├── batch_sfc_import.py └── format_nc_ERA.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Top-level build and binary directories 2 | /bin/ 3 | /build/ 4 | 5 | # Build directories 6 | src/build/* 7 | 8 | # Dependency directories 9 | src/depend/* 10 | 11 | # Temporary and swap files 12 | *.swp 13 | 14 | # NetCDF and object files 15 | *.nc 16 | *.o 17 | *.d 18 | 19 | # Static libraries and archives 20 | *.a 21 | *.tar.gz 22 | 23 | # Test data and directories 24 | test/ 25 | test/*.nc 26 | test/*.nc4 27 | test/*.dat 28 | test/*.txt 29 | test/*.g 30 | test/cfsr_tests 31 | test/*.eps 32 | test/*.png 33 | test/statfigs/* 34 | 35 | # Editor settings 36 | .vscode/ 37 | 38 | # CMake generated files (top-level) 39 | CMakeCache.txt 40 | CMakeFiles/ 41 | cmake_install.cmake 42 | Makefile 43 | install_manifest.txt 44 | 45 | # CMake generated files in src 46 | src/Makefile 47 | 48 | # CMake generated files in subdirectories 49 | src/base/cmake_install.cmake 50 | src/blobs/cmake_install.cmake 51 | src/blocking/cmake_install.cmake 52 | src/netcdf-cxx-4.2/cmake_install.cmake 53 | src/nodes/cmake_install.cmake 54 | src/sandbox/cmake_install.cmake 55 | src/util/cmake_install.cmake 56 | 57 | src/base/Makefile 58 | src/blobs/Makefile 59 | src/blocking/Makefile 60 | src/netcdf-cxx-4.2/Makefile 61 | src/nodes/Makefile 62 | src/sandbox/Makefile 63 | src/util/Makefile 64 | 65 | # IDE settings 66 | .vscode 67 | .idea 68 | 69 | 70 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | cmake_minimum_required(VERSION 3.12) 7 | 8 | project(tempestextremes LANGUAGES CXX) 9 | 10 | set(CMAKE_CXX_STANDARD 11) 11 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 12 | 13 | set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/CMake ${CMAKE_MODULE_PATH}) 14 | 15 | # Do not override the installation prefix to the source directory 16 | # Users should specify an install prefix if desired via -DCMAKE_INSTALL_PREFIX 17 | 18 | # Set default build type if not already defined 19 | if(NOT CMAKE_BUILD_TYPE) 20 | set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) 21 | endif() 22 | 23 | # MPI is now enabled only via the command line (-DENABLE_MPI=ON) 24 | if(DEFINED ENABLE_MPI AND ENABLE_MPI) 25 | find_package(MPI REQUIRED) 26 | if(MPI_FOUND) 27 | add_compile_definitions(TEMPEST_MPIOMP) 28 | endif() 29 | endif() 30 | 31 | if(WIN32) 32 | add_compile_definitions(_USE_MATH_DEFINES 1) 33 | endif() 34 | 35 | # Required dependencies 36 | find_package(NetCDF REQUIRED) 37 | 38 | # Output directories for out-of-source builds 39 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") 40 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") 41 | set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") 42 | 43 | add_subdirectory(src) 44 | 45 | # Ensure the runtime output directory exists. 46 | file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/bin") 47 | 48 | # Install rule: copy all files from the build's bin directory to the install prefix's bin directory. 49 | install(DIRECTORY "${CMAKE_BINARY_DIR}/bin/" DESTINATION bin) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2025, Paul Ullrich 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | -------------------------------------------------------------------------------- /Make.defs: -------------------------------------------------------------------------------- 1 | ## 2 | ## Compilation directories 3 | ## 4 | DEPDIR=depend 5 | BUILDDIR=build 6 | 7 | ## 8 | ## Dependency file construction 9 | ## 10 | DEPDIR=depend 11 | MAKEDEPEND=mkdir -p $(DEPDIR); \ 12 | echo "-- Generating dependencies for $<"; \ 13 | touch $(DEPDIR)/$*.P && makedepend -- $(CFLAGS) -- -f $(DEPDIR)/$*.P $< > /dev/null 2>&1; \ 14 | sed 's/\($*\)\.o[ :]*/$(BUILDDIR)\/\1.o $(DEPDIR)\/$*.d : /g' < $(DEPDIR)/$*.P > $(DEPDIR)/$*.d; \ 15 | rm -f $(DEPDIR)/$*.P 16 | #$(CC) -M $(CFLAGS) $< > $(DEPDIR)/$*.P; \ 17 | 18 | ## 19 | ## Compilation rules 20 | ## 21 | $(BUILDDIR)/%.o : %.cpp 22 | @mkdir -p $(BUILDDIR) 23 | $(CC) $(CFLAGS) -c -o $@ $< 24 | 25 | $(BUILDDIR)/%.o : %.c 26 | @mkdir -p $(BUILDDIR) 27 | $(CCOMP) $(CFLAGS) -c -o $@ $< 28 | 29 | $(BUILDDIR)/%.o : %.f90 30 | @mkdir -p $(BUILDDIR) 31 | $(F90) $(FORTFLAGS) -c -o $@ $< 32 | 33 | $(BUILDDIR)/%.o : %.f 34 | @mkdir -p $(BUILDDIR) 35 | $(F90) $(FORTFLAGS) -c -o $@ $< 36 | 37 | ## 38 | ## Dependency generation 39 | ## 40 | $(DEPDIR)/%.d : %.cpp 41 | @$(MAKEDEPEND) 42 | $(DEPDIR)/%.d : %.f90 43 | @$(MAKEDEPEND) 44 | $(DEPDIR)/%.d : %.f 45 | @$(MAKEDEPEND) 46 | 47 | -------------------------------------------------------------------------------- /analysis/blobmetrics/doc/BlobMetrics-UserGuide.aux: -------------------------------------------------------------------------------- 1 | \relax 2 | \@writefile{toc}{\contentsline {section}{\numberline {1}Minimum software and data requirements}{3}} 3 | \@writefile{toc}{\contentsline {section}{\numberline {2}Usage}{3}} 4 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Command line usage}{3}} 5 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.1.1}Namelists}{3}} 6 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Example workflow: running BlobMetrics for the first time}{4}} 7 | \@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces BlobMetrics schematic. Inputs are in green, analysis tools are in purple, and outputs are in pink.}}{5}} 8 | \newlabel{fig:schematic}{{1}{5}} 9 | \@writefile{toc}{\contentsline {section}{\numberline {3}BlobMetrics Utilities}{5}} 10 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Read BlobStats files into a single table (\texttt {--readfiles})}{5}} 11 | \newlabel{readfiles}{{3.1}{5}} 12 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.1.1}Requirements}{5}} 13 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.1.2}Command line syntax}{6}} 14 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.1.3}Output}{6}} 15 | \newlabel{tableoutput}{{3.1.3}{6}} 16 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Handling instances of merging/splitting blobs (\texttt {--mergetable)}}{6}} 17 | \newlabel{mergesection}{{3.2}{6}} 18 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.2.1}Requirements}{7}} 19 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.2.2}Command line syntax}{7}} 20 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.2.3}Output}{8}} 21 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Create a per-blob summary table (\texttt {--summarize})}{8}} 22 | \newlabel{summarysection}{{3.3}{8}} 23 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.1}Requirements}{8}} 24 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.2}Command line syntax}{8}} 25 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.3}Output}{9}} 26 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Reading in NetCDF data (\texttt {--readnetcdf})}{9}} 27 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.1}Requirements}{10}} 28 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.2}Command line syntax}{10}} 29 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.4.3}Output}{10}} 30 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Comparing two datasets on a per-timestep basis (\texttt {--intercomparison})}{10}} 31 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.1}Requirements}{10}} 32 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.2}Command line syntax}{11}} 33 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.3}Output}{11}} 34 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Comparing the average blocking frequency of two datasets (\texttt {pearsonrmse})}{12}} 35 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.1}Requirements}{12}} 36 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.2}Command line syntax}{12}} 37 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.3}Output}{12}} 38 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.7}Create a summary report (\texttt {--genreport})}{12}} 39 | \@writefile{toc}{\contentsline {section}{\numberline {A}BlobStats File Format}{13}} 40 | \newlabel{blobformat}{{A}{13}} 41 | -------------------------------------------------------------------------------- /analysis/blobmetrics/doc/BlobMetrics-UserGuide.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/analysis/blobmetrics/doc/BlobMetrics-UserGuide.pdf -------------------------------------------------------------------------------- /analysis/blobmetrics/doc/BlobMetrics-UserGuide.synctex.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/analysis/blobmetrics/doc/BlobMetrics-UserGuide.synctex.gz -------------------------------------------------------------------------------- /analysis/blobmetrics/doc/BlobMetrics-UserGuide.toc: -------------------------------------------------------------------------------- 1 | \contentsline {section}{\numberline {1}Minimum software and data requirements}{3} 2 | \contentsline {section}{\numberline {2}Usage}{3} 3 | \contentsline {subsection}{\numberline {2.1}Command line usage}{3} 4 | \contentsline {subsubsection}{\numberline {2.1.1}Namelists}{3} 5 | \contentsline {subsection}{\numberline {2.2}Example workflow: running BlobMetrics for the first time}{4} 6 | \contentsline {section}{\numberline {3}BlobMetrics Utilities}{5} 7 | \contentsline {subsection}{\numberline {3.1}Read BlobStats files into a single table (\texttt {--readfiles})}{5} 8 | \contentsline {subsubsection}{\numberline {3.1.1}Requirements}{5} 9 | \contentsline {subsubsection}{\numberline {3.1.2}Command line syntax}{6} 10 | \contentsline {subsubsection}{\numberline {3.1.3}Output}{6} 11 | \contentsline {subsection}{\numberline {3.2}Handling instances of merging/splitting blobs (\texttt {--mergetable)}}{6} 12 | \contentsline {subsubsection}{\numberline {3.2.1}Requirements}{7} 13 | \contentsline {subsubsection}{\numberline {3.2.2}Command line syntax}{7} 14 | \contentsline {subsubsection}{\numberline {3.2.3}Output}{8} 15 | \contentsline {subsection}{\numberline {3.3}Create a per-blob summary table (\texttt {--summarize})}{8} 16 | \contentsline {subsubsection}{\numberline {3.3.1}Requirements}{8} 17 | \contentsline {subsubsection}{\numberline {3.3.2}Command line syntax}{8} 18 | \contentsline {subsubsection}{\numberline {3.3.3}Output}{9} 19 | \contentsline {subsection}{\numberline {3.4}Reading in NetCDF data (\texttt {--readnetcdf})}{9} 20 | \contentsline {subsubsection}{\numberline {3.4.1}Requirements}{10} 21 | \contentsline {subsubsection}{\numberline {3.4.2}Command line syntax}{10} 22 | \contentsline {subsubsection}{\numberline {3.4.3}Output}{10} 23 | \contentsline {subsection}{\numberline {3.5}Comparing two datasets on a per-timestep basis (\texttt {--intercomparison})}{10} 24 | \contentsline {subsubsection}{\numberline {3.5.1}Requirements}{10} 25 | \contentsline {subsubsection}{\numberline {3.5.2}Command line syntax}{11} 26 | \contentsline {subsubsection}{\numberline {3.5.3}Output}{11} 27 | \contentsline {subsection}{\numberline {3.6}Comparing the average blocking frequency of two datasets (\texttt {pearsonrmse})}{12} 28 | \contentsline {subsubsection}{\numberline {3.6.1}Requirements}{12} 29 | \contentsline {subsubsection}{\numberline {3.6.2}Command line syntax}{12} 30 | \contentsline {subsubsection}{\numberline {3.6.3}Output}{12} 31 | \contentsline {subsection}{\numberline {3.7}Create a summary report (\texttt {--genreport})}{12} 32 | \contentsline {section}{\numberline {A}BlobStats File Format}{13} 33 | -------------------------------------------------------------------------------- /analysis/blobmetrics/doc/Framework_figure.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/analysis/blobmetrics/doc/Framework_figure.pdf -------------------------------------------------------------------------------- /analysis/blobmetrics/download_libraries.R: -------------------------------------------------------------------------------- 1 | #Run this before using stitch_metric_framework.R 2 | 3 | 4 | req_libs<-c("abind","akima","argparse", 5 | "ggplot2","gtable", 6 | "knitr","markdown","ncdf4", 7 | "ncdf4.helpers","PCICt", 8 | "reshape2","RNetCDF","rmarkdown") 9 | for (r in req_libs){ 10 | install.packages(r, repos='http://cran.us.r-project.org') 11 | } 12 | -------------------------------------------------------------------------------- /analysis/blobmetrics/examples/blank_setupfile.R: -------------------------------------------------------------------------------- 1 | #The requirements are as follows: 2 | #1) StitchBlobs files from each dataset 3 | #2) Corresponding BlobStats files from each dataset 4 | #3) Optional but recommended: BlobStats output from 5 | # the separate DetectBlobs function 6 | # (This will help deal with cases where two blobs 7 | # merge into a single blob at a later time) 8 | 9 | #REPORT HEADER: ANY TEXT THAT YOU WANT TO GO AT THE TOP OF THE REPORT 10 | #NOTE: must be in quotes. Careful of any special characters 11 | metadata_datasets="" 12 | #This is the number of years in each dataset! (Vector of integers) 13 | nyears<-c(0) 14 | 15 | ########### 16 | #FILE INFO 17 | #Will always output RData files 18 | #But there is an option to also output text files 19 | # or CSV files with the table data 20 | #Set following variables to TRUE if you wish to have one or both of these outputs 21 | output_txt<-FALSE 22 | output_csv<-FALSE 23 | 24 | #Names of the datasets (vector of strings) 25 | #Example: Varnames<-c("ERA-Interim","CFSR","MERRA-2") 26 | #For all of the vectors of strings, make sure that the lengths are identical to 27 | # the length of this vector! 28 | Varnames<-c() 29 | 30 | #The spatial resolutions of the datasets (Vector of strings. 31 | #Optional-- delete this variable if you don't want to use it) 32 | #Example: resolutions<-c("1x1","0.5x0.625","0.5x0.5") 33 | resolutions<-c() 34 | 35 | #Working directory (String) 36 | #This is where all of the R function files are stored 37 | work_dir<-"" 38 | #Output directory(String) 39 | # Main directory where all output files will go 40 | output_dir<-"" 41 | #Name of output subdirectory (String) 42 | # This will be created in the output directory specified above 43 | output_subdir<-"" 44 | #Name of the file prefix (String). 45 | #Output file will be [prefix]_[Varname]_[suffix] (for example, [prefix]_ERA_stitchtable.RData) 46 | output_prefix<-"" 47 | #Output name for the master namelist that will be generated using this file template (String) 48 | fname_namelist<-"" 49 | 50 | 51 | #Input lists of files 52 | #Must use full pathname for each of these lists! 53 | #Example: stitch_lists<-c("~/input_dir/ERA/ERA_list","~/input_dir/CFSR/CFSR_list","~/input_dir/MERRA/MERRA_list") 54 | #List of BlobStats files (from StitchBlobs output) to read into readfiles.R (vector of strings) 55 | stitch_lists<-c() 56 | #Use DetectBlobs inputs? (TRUE/FALSE) 57 | use_detectblob<-TRUE 58 | #List of BlobStats files (from DetectBlobs output) to read into readfiles.R (vector of strings) 59 | #If not using BlobStats files from DetectBlobs, then delete this variable 60 | detect_lists<-c() 61 | #List of StitchBlobs files to read into readnetcdf.R (vector of strings) 62 | stitchblob_lists<-c() 63 | #Name of the StitchBlobs variable in the NetCDF file (vector of strings) 64 | #Note: if the variable name is identical for all file, you can do 65 | # varvec<-rep("VARNAME",length(Varnames)) 66 | varvec<-c() 67 | #Name of the time, lat, lon axes (String) 68 | timename<-"time" 69 | latname<-"lat" 70 | lonname<-"lon" 71 | #Transform the lon axis? (TRUE/FALSE) 72 | #Note: if a dataset already has this longitude extent, it will do nothing 73 | #from 0/360 to -180/180 74 | transformto180<-FALSE 75 | #from -180/180 to 0/360 76 | transformto360<-TRUE 77 | #Subset lat and lon if desired (note: minlon and maxlon should correspond to 78 | # the appropriate longitude extent, i.e. either in the -180/180 range or 0/360 range) 79 | #If you don't wish to subset, delete these four variables 80 | minlat<- -90 81 | maxlat<- 90 82 | minlon<- 0 83 | maxlon<- 359 84 | #Regrid to 1 degree? (TRUE/FALSE) 85 | regridto1degree<-TRUE 86 | #For Pearson correlation and RMSE: only use common subset of time? 87 | # (Set to false if comparing two different time periods, such as historical vs RCP8.5) 88 | useCommonTime<-FALSE 89 | 90 | #Which sections will be included in the output report? (T/F) 91 | #Initial summary table-- requires output from --summarize and --readfiles/--mergetable 92 | includeSummTable<-TRUE 93 | #Blocking frequency plots 94 | includeFrequencyPlots<-TRUE 95 | #Pearson Pattern Correlation between blocking frequencies 96 | includePearson<-TRUE 97 | #Root mean square error between blocking frequencies 98 | includeRMSE<-TRUE 99 | #Density plots for duration, speed, and size and associated p-values 100 | includeDensityP<-TRUE 101 | #Intercomparison 102 | includeProbability<-TRUE 103 | includeSpatialSimilarity<-TRUE 104 | -------------------------------------------------------------------------------- /analysis/blobmetrics/gen_blank_setupfile.R: -------------------------------------------------------------------------------- 1 | #This script will generate a blank namelist which can then be filled in 2 | 3 | fname<-"blank_setupfile.R" 4 | sink(fname,split=T) 5 | 6 | cat("#The requirements are as follows: 7 | #1) StitchBlobs files from each dataset 8 | #2) Corresponding BlobStats files from each dataset 9 | #3) Optional but recommended: BlobStats output from 10 | # the separate DetectBlobs function 11 | # (This will help deal with cases where two blobs 12 | # merge into a single blob at a later time) 13 | 14 | #REPORT HEADER: ANY TEXT THAT YOU WANT TO GO AT THE TOP OF THE REPORT 15 | #NOTE: must be in quotes. Careful of any special characters 16 | metadata_datasets=\"\" 17 | #This is the number of years in each dataset! (Vector of integers) 18 | nyears<-c(0) 19 | 20 | ########### 21 | #FILE INFO 22 | #Will always output RData files 23 | #But there is an option to also output text files 24 | # or CSV files with the table data 25 | #Set following variables to TRUE if you wish to have one or both of these outputs 26 | output_txt<-FALSE 27 | output_csv<-FALSE 28 | 29 | #Names of the datasets (vector of strings) 30 | #Example: Varnames<-c(\"ERA-Interim\",\"CFSR\",\"MERRA-2\") 31 | #For all of the vectors of strings, make sure that the lengths are identical to 32 | # the length of this vector! 33 | Varnames<-c() 34 | 35 | #The spatial resolutions of the datasets (Vector of strings. 36 | #Optional-- delete this variable if you don't want to use it) 37 | #Example: resolutions<-c(\"1x1\",\"0.5x0.625\",\"0.5x0.5\") 38 | resolutions<-c() 39 | 40 | #Working directory (String) 41 | #This is where all of the R function files are stored 42 | work_dir<-\"\" 43 | #Output directory(String) 44 | # Main directory where all output files will go 45 | output_dir<-\"\" 46 | #Name of output subdirectory (String) 47 | # This will be created in the output directory specified above 48 | output_subdir<-\"\" 49 | #Name of the file prefix (String). 50 | #Output file will be [prefix]_[Varname]_[suffix] (for example, [prefix]_ERA_stitchtable.RData) 51 | output_prefix<-\"\" 52 | #Output name for the master namelist that will be generated using this file template (String) 53 | fname_namelist<-\"\" 54 | 55 | 56 | #Input lists of files 57 | #Must use full pathname for each of these lists! 58 | #Example: stitch_lists<-c(\"~/input_dir/ERA/ERA_list\",\"~/input_dir/CFSR/CFSR_list\",\"~/input_dir/MERRA/MERRA_list\") 59 | #List of BlobStats files (from StitchBlobs output) to read into readfiles.R (vector of strings) 60 | stitch_lists<-c() 61 | #Use DetectBlobs inputs? (TRUE/FALSE) 62 | use_detectblob<-TRUE 63 | #List of BlobStats files (from DetectBlobs output) to read into readfiles.R (vector of strings) 64 | #If not using BlobStats files from DetectBlobs, then delete this variable 65 | detect_lists<-c() 66 | #List of StitchBlobs files to read into readnetcdf.R (vector of strings) 67 | stitchblob_lists<-c() 68 | #Name of the StitchBlobs variable in the NetCDF file (vector of strings) 69 | #Note: if the variable name is identical for all file, you can do 70 | # varvec<-rep(\"VARNAME\",length(Varnames)) 71 | varvec<-c() 72 | #Name of the time, lat, lon axes (String) 73 | timename<-\"time\" 74 | latname<-\"lat\" 75 | lonname<-\"lon\" 76 | #Transform the lon axis? (TRUE/FALSE) 77 | #Note: if a dataset already has this longitude extent, it will do nothing 78 | #from 0/360 to -180/180 79 | transformto180<-FALSE 80 | #from -180/180 to 0/360 81 | transformto360<-TRUE 82 | #Subset lat and lon if desired (note: minlon and maxlon should correspond to 83 | # the appropriate longitude extent, i.e. either in the -180/180 range or 0/360 range) 84 | #If you don't wish to subset, delete these four variables 85 | minlat<- -90 86 | maxlat<- 90 87 | minlon<- 0 88 | maxlon<- 359 89 | #Regrid to 1 degree? (TRUE/FALSE) 90 | regridto1degree<-TRUE 91 | #For Pearson correlation and RMSE: only use common subset of time? 92 | # (Set to false if comparing two different time periods, such as historical vs RCP8.5) 93 | useCommonTime<-FALSE 94 | 95 | #Which sections will be included in the output report? (T/F) 96 | #Initial summary table-- requires output from --summarize and --readfiles/--mergetable 97 | includeSummTable<-TRUE 98 | #Blocking frequency plots 99 | includeFrequencyPlots<-TRUE 100 | #Pearson Pattern Correlation between blocking frequencies 101 | includePearson<-TRUE 102 | #Root mean square error between blocking frequencies 103 | includeRMSE<-TRUE 104 | #Density plots for duration, speed, and size and associated p-values 105 | includeDensityP<-TRUE 106 | #Intercomparison 107 | includeProbability<-TRUE 108 | includeSpatialSimilarity<-TRUE 109 | ") 110 | sink() 111 | -------------------------------------------------------------------------------- /analysis/blobmetrics/generateReport.R: -------------------------------------------------------------------------------- 1 | require(knitr) 2 | require(markdown) 3 | require(rmarkdown) 4 | require(reshape2) 5 | #Returns in the -180->180 range 6 | lon_convert<-function(lon){ 7 | distFrom180=lon-180. 8 | return(ifelse( 9 | distFrom180<0, 10 | lon, 11 | -(180-distFrom180) 12 | )) 13 | } 14 | #Returns in the 0->360 range 15 | lon_convert2<-function(lon){ 16 | return(ifelse(lon<0,360+lon,lon)) 17 | } 18 | 19 | #source("~/tempestextremes/test/STITCH_METRICS/namelist_report_JJA_SP.R") 20 | #Generate the title string based on the variables 21 | 22 | title_string<-paste("Comparison of blocking data for ",Varnames[1]) 23 | for (t in 2:length(Varnames)){ 24 | title_string<-paste(title_string, Varnames[t], sep=", ") 25 | } 26 | 27 | 28 | md_file<-"report_template.Rmd" 29 | 30 | avgdata<-data.frame(x=numeric(),y=numeric(),value=numeric(), 31 | VAR=character(),lon=numeric(),lat=numeric()) 32 | 33 | #Make a list object that will have all of the relevant data 34 | comparison_data<-list() 35 | for (i in 1:length(Varnames)){ 36 | comparison_data$varname[i]<-Varnames[i] 37 | #Load the merged table 38 | load(mergefiles[i]) 39 | merge_dfname<-sprintf("V%d_merge",i) 40 | assign(merge_dfname,get(df_name)) 41 | comparison_data$mergename[i]<-merge_dfname 42 | #load the summary table 43 | load(summfiles[i]) 44 | summ_dfname<-sprintf("V%d_summ",i) 45 | assign(summ_dfname,df_summ) 46 | comparison_data$summname[i]<-summ_dfname 47 | #load the blob data 48 | load(blobfiles[i]) 49 | assign(sprintf("lat%d",i),lat_axis) 50 | assign(sprintf("lon%d",i),lon_axis) 51 | assign(sprintf("time%d",i),time_format) 52 | assign(sprintf("blob%d",i),get(blobname[i])) 53 | temp_var<-get(sprintf("blob%d",i)) 54 | temp_var[which(temp_var>0)]<-1 55 | assign(sprintf("blob%d",i),temp_var) 56 | #average the blob data 57 | avgname<-sprintf("avgblob%d",i) 58 | ablob<-apply(get(sprintf("blob%d",i)),c(1,2),mean) 59 | #Add to the long table for plotting 60 | temp<-melt(ablob,varnames=c("x","y")) 61 | temp$VAR<-rep(Varnames[i],nrow(temp)) 62 | temp$lon<-lon_axis[temp$x] 63 | temp$lat<-lat_axis[temp$y] 64 | avgdata<-rbind(avgdata,temp) 65 | assign(avgname,ablob) 66 | } 67 | 68 | avgdata$VAR<-factor(avgdata$VAR,levels=Varnames) 69 | 70 | #Generate the report from the template 71 | rmarkdown::render(md_file,output_file=output_name) 72 | 73 | -------------------------------------------------------------------------------- /analysis/blobmetrics/mergetable.R: -------------------------------------------------------------------------------- 1 | lon_convert<-function(lon){ 2 | distFrom180=lon-180. 3 | return(ifelse( 4 | distFrom180<0, 5 | lon, 6 | -(180-distFrom180) 7 | )) 8 | } 9 | 10 | 11 | #-180 to 180 -> 12 | lon_convert2<-function(lon){ 13 | return(ifelse(lon<0,360+lon,lon)) 14 | } 15 | 16 | 17 | merge_dfs<-function(df_stitch,df_nostitch,rfn="",textfn="",csvfn="",df_merged_name="", 18 | byvec=c("datehour","area","var")){ 19 | df_merged_name<-ifelse(df_merged_name=="","df_merged",df_merged_name) 20 | 21 | df_names<-names(df_stitch) 22 | #This data frame only has common rows 23 | df_comm<-merge(df_stitch,df_nostitch,by=byvec) 24 | #This data frame has all rows, both merged and not 25 | df_tot<-merge(df_stitch,df_nostitch,by=byvec,all=T) 26 | #This data frame has all the rows of the merged blobs 27 | df_istot<-df_tot[is.na(df_tot$bnum.y),] 28 | #This data frame has all the rows of the multiple blobs that make up the merged blob 29 | df_isnot<-df_tot[is.na(df_tot$bnum.x),] 30 | 31 | df_comm$bnum2<-df_comm$bnum.x 32 | df_tot$bnum2<-df_tot$bnum.x 33 | df_istot$bnum2<-df_istot$bnum.x 34 | df_isnot$bnum2<-df_isnot$bnum.y 35 | 36 | #Now checking blobs where there is not a match: 37 | #Check whether or not blobs from DetectBlobs are occurring at the same time step 38 | #within the extent of the original StitchBlobs output 39 | for (t in unique(df_istot$datehour)){ 40 | df_check<-df_istot[df_istot$datehour==t,] 41 | df_otherblobs<-df_isnot[df_isnot$datehour==t,] 42 | if (nrow(df_otherblobs)>0){ 43 | for (n in 1:nrow(df_check)){ 44 | #Get the min/max lat and lon extent 45 | #Might need to deal with periodic boundary condition! 46 | clatmin<-df_check[n,"minlat.x"] 47 | clatmax<-df_check[n,"maxlat.x"] 48 | PER_BOUND<-FALSE 49 | clonmin<-df_check[n,"minlon.x"] 50 | clonmax<-df_check[n,"maxlon.x"] 51 | axis180<-ifelse((clonmin<0 | clonmax<0),TRUE,FALSE) 52 | if (clonmin>clonmax){ 53 | PER_BOUND<-TRUE 54 | clonmin<-ifelse(axis180==FALSE,lon_convert(clonmin),lon_convert2(clonmin)) 55 | clonmax<-ifelse(axis180==FALSE,lon_convert(clonmax),lon_convert2(clonmax)) 56 | } 57 | for (y in 1:nrow(df_otherblobs)){ 58 | #Does it fall within the bounds of the big stitched blob? 59 | blatmin<-df_otherblobs[y,"minlat.y"] 60 | blatmax<-df_otherblobs[y,"maxlat.y"] 61 | blonmin<-df_otherblobs[y,"minlon.y"] 62 | blonmax<-df_otherblobs[y,"maxlon.y"] 63 | if (PER_BOUND==TRUE){ 64 | blonmin<-ifelse(axis180==FALSE,lon_convert(blonmin),lon_convert2(blonmin)) 65 | blonmax<-ifelse(axis180==FALSE,lon_convert(blonmax),lon_convert2(blonmax)) 66 | } 67 | if (((blatmin>=clatmin)&(blatmax<=clatmax)& 68 | (blonmin>=clonmin)&(blonmax<=clonmax)& 69 | (df_check[n,"var"]==df_otherblobs[y,"var"]))){ 70 | #Replace the unmerged blob number with the merged blob number 71 | df_otherblobs[y,"bnum.x"]<-df_check[n,"bnum.x"] 72 | df_comm<-rbind(df_comm,df_otherblobs[y,]) 73 | } 74 | } 75 | } 76 | } 77 | } 78 | #Clean up the output 79 | tcol<-grep("datehour",names(df_comm)) 80 | vcol<-grep("var",names(df_comm)) 81 | bcol<-grep("bnum.x",names(df_comm)) 82 | b2col<-grep("bnum2",names(df_comm)) 83 | acol<-grep("area",names(df_comm)) 84 | akcol<-grep("area_km",names(df_comm)) 85 | acol<-acol[!acol %in% akcol] 86 | #Get all of the columns with .y in the name 87 | ycol<-grep(".y",names(df_comm)) 88 | bycol<-grep("bnum.y",names(df_comm)) 89 | #Remove bnum.y from columns 90 | ycol<-ycol[!ycol %in% bycol] 91 | 92 | 93 | df_return<-df_comm[,c(tcol,ycol,bcol,b2col,acol,vcol)] 94 | colnames(df_return)<-gsub("\\.y","",names(df_return)) 95 | colnames(df_return)<-gsub("\\.x","",names(df_return)) 96 | df_final<-df_return[,c(df_names,"bnum2")] 97 | #Switch file and bnum2 98 | nlast<-length(names(df_final)) 99 | df_final<-df_final[,c(1:(nlast-2),nlast,nlast-1)] 100 | df_final<-df_final[order(df_final$datehour),] 101 | if (rfn!=""){ 102 | assign(df_merged_name,df_final) 103 | assign("df_name",df_merged_name) 104 | save(list=c(df_merged_name,"df_name"),file=rfn) 105 | print(sprintf("Wrote %s to file",rfn)) 106 | } 107 | if (textfn!=""){ 108 | write.table(df_final,file=textfn,sep="\t",row.names=FALSE,quote=FALSE) 109 | print(sprintf("Wrote %s to file",textfn)) 110 | } 111 | if (csvfn!=""){ 112 | write.csv(df_final,file=csvfn,row.names=FALSE,quote=FALSE) 113 | print(sprintf("Wrote %s to file",csvfn)) 114 | } 115 | #Return the merged data frame 116 | return(df_final) 117 | } -------------------------------------------------------------------------------- /analysis/blobmetrics/pearson.R: -------------------------------------------------------------------------------- 1 | require(reshape2) 2 | pearson_arr<-function(arr1,arr2,lat1,lat2,lon1,lon2,interp=FALSE,centered=FALSE){ 3 | 4 | longdata1<-melt(arr1,value.name="V1") 5 | longdata1$lon<-lon1[longdata1$Var1] 6 | longdata1$lat<-lat1[longdata1$Var2] 7 | 8 | 9 | longdata2<-melt(arr2,value.name="V2") 10 | longdata2$lon<-lon2[longdata2$Var1] 11 | longdata2$lat<-lat2[longdata2$Var2] 12 | if (interp==TRUE){ 13 | 14 | temp<-merge(longdata1[,c("lon","lat","V1")],longdata2[,c("lon","lat","V2")],by=c("lon","lat"),all=T) 15 | temp_noV1<-temp[!is.na(temp$V1),] 16 | narows<-which(is.na(temp_noV1$V2)) 17 | 18 | for (i in narows){ 19 | temp_noV1[i,"V2"]<-interp_pt(temp_noV1[i,"lon"],temp_noV1[i,"lat"],temp,lon2,lat2) 20 | } 21 | longdata<-temp_noV1 22 | }else{ 23 | longdata<-merge(longdata1[,c("lon","lat","V1")],longdata2[,c("lon","lat","V2")],by=c("lon","lat")) 24 | } 25 | 26 | #Create a cosine latitude column 27 | longdata$coslat<-cos(longdata$lat*pi/180) 28 | longdata$cosV1<-longdata$V1*longdata$coslat 29 | longdata$cosV2<-longdata$V2*longdata$coslat 30 | 31 | Vproduct<-longdata$cosV1*longdata$cosV2 32 | 33 | V1bar<-mean(longdata$cosV1) 34 | V2bar<-mean(longdata$cosV2) 35 | 36 | V1diff<-longdata$cosV1-V1bar 37 | V2diff<-longdata$cosV2-V2bar 38 | VdiffProduct<-V1diff*V2diff 39 | 40 | if (centered==TRUE){ 41 | r<-sum(VdiffProduct)/sqrt(sum(V1diff*V1diff)*sum(V2diff*V2diff)) 42 | }else{ 43 | r<-sum(Vproduct)/sqrt(sum(longdata$cosV1*longdata$cosV1)*sum(longdata$cosV2*longdata$cosV2)) 44 | } 45 | 46 | return(r) 47 | } -------------------------------------------------------------------------------- /analysis/blobmetrics/python/mergeStats.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pandas as pd 3 | pd.set_option('mode.chained_assignment',None) 4 | 5 | #Look at when the data is merged-- are there any blobs that should be split? 6 | def lon_convert_360_180(lon): 7 | distFrom180=lon-180 8 | retlon=-999 9 | if (distFrom180<0): 10 | retlon=lon 11 | else: 12 | retlon=distFrom180-180. 13 | return(retlon) 14 | 15 | 16 | parser=argparse.ArgumentParser(description="Provide the BlobStats outputs from both StitchBlobs and DetectBlobs") 17 | parser.add_argument("-fs","--filestitch",required=True,action="store") 18 | parser.add_argument("-fd","--filedetect",required=True,action="store") 19 | parser.add_argument("-o","--out",required=True,action="store") 20 | 21 | results=parser.parse_args() 22 | 23 | f_stitch=results.filestitch 24 | f_detect=results.filedetect 25 | fname_out=results.out 26 | 27 | df_stitch=pd.read_csv(f_stitch,na_filter=False) 28 | df_detect=pd.read_csv(f_detect,na_filter=False) 29 | 30 | #Intersection and union of files 31 | #intersection 32 | df_merged=pd.merge(df_stitch,df_detect,how="inner",on=['time','region','season','var','minlat','minlon','maxlat','maxlon','centlat','centlon']) 33 | #union 34 | df_allmerge=pd.merge(df_stitch,df_detect,how='outer',on=['time','region','season','var','minlat','minlon','maxlat','maxlon','centlat','centlon']) 35 | df_hasmerged=df_allmerge[df_allmerge['bnum_y'].isna()] 36 | df_issplit=df_allmerge[df_allmerge['bnum_x'].isna()] 37 | 38 | df_merged['bnum2'] = df_merged['bnum_x'] 39 | df_allmerge['bnum2'] = df_allmerge['bnum_x'] 40 | df_hasmerged['bnum2'] = df_hasmerged['bnum_x'] 41 | df_issplit['bnum2'] = df_issplit['bnum_y'] 42 | 43 | df_total=pd.DataFrame() 44 | for c in df_merged.columns.values: 45 | if not "_y" in c: 46 | if "_x" in c: 47 | newname=c.replace("_x","") 48 | df_total[newname]=df_merged[c] 49 | else: 50 | df_total[c]=df_merged[c] 51 | df_total['merged']=['NO']*len(df_total) 52 | for t in sorted(list(set(df_hasmerged['time']))): 53 | df_checkmerge=df_hasmerged[df_hasmerged['time'].str.match(t)] 54 | df_splits=df_issplit[df_issplit['time'].str.match(t)] 55 | if (len(df_splits)>0): 56 | for n in range(0,len(df_checkmerge)): 57 | minlat_merged=df_checkmerge['minlat'].iloc[n] 58 | maxlat_merged=df_checkmerge['maxlat'].iloc[n] 59 | minlon_merged=df_checkmerge['minlon'].iloc[n] 60 | maxlon_merged=df_checkmerge['maxlon'].iloc[n] 61 | PER_BOUND=False 62 | if (minlon_merged>maxlon_merged): 63 | PER_BOUND=True 64 | minlon_merged=lon_convert_360_180(minlon_merged) 65 | maxlon_merged=lon_convert_360_180(maxlon_merged) 66 | if (minlon_merged>maxlon_merged): 67 | maxlon_merged+=360. 68 | for m in range(0,len(df_splits)): 69 | minlat_split=df_splits['minlat'].iloc[m] 70 | maxlat_split=df_splits['maxlat'].iloc[m] 71 | minlon_split=df_splits['minlon'].iloc[m] 72 | maxlon_split=df_splits['maxlon'].iloc[m] 73 | 74 | 75 | minlon_split_180=lon_convert_360_180(minlon_split) 76 | maxlon_split_180=lon_convert_360_180(maxlon_split) 77 | 78 | #Check original boundaries 79 | is_inside=False 80 | if ((minlat_split>=minlat_merged) & 81 | (maxlat_split<=maxlat_merged)& 82 | (minlon_split>=minlon_merged)& 83 | (maxlon_split<=maxlon_merged)): 84 | is_inside=True 85 | else: 86 | if ((minlat_split>=minlat_merged) & 87 | (maxlat_split<=maxlat_merged)& 88 | (minlon_split_180>=minlon_merged)& 89 | (maxlon_split_180<=maxlon_merged)): 90 | is_inside=True 91 | if (is_inside==True): 92 | merge_dict={} 93 | for c in df_splits.columns.values: 94 | if not "_x" in c: 95 | if "_y" in c: 96 | if ((c!="bnum_y") & (c!="fname_y")): 97 | newname=c.replace("_y","") 98 | merge_dict[newname]=df_splits[c].iloc[m] 99 | else: 100 | merge_dict[c] = df_splits[c].iloc[m] 101 | merge_dict['bnum']=df_checkmerge['bnum_x'].iloc[n] 102 | merge_dict['merged']='YES' 103 | merge_dict['fname']=df_checkmerge['fname_x'].iloc[n] 104 | df_total=df_total.append(merge_dict,ignore_index=True) 105 | df_total=df_total.sort_values(['var','region','time']) 106 | #print("Length of stitchblobs for {:} {:} was {:} and length of detectblobs was {:}; length of merged dataframe is {:}".format(d,r,len(df_stitch),len(df_detect),len(df_total))) 107 | 108 | df_total.to_csv(fname_out,index=False,na_rep="_") 109 | print("Wrote {:}".format(fname_out)) 110 | -------------------------------------------------------------------------------- /analysis/blobmetrics/python/readStats.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pandas as pd 3 | pd.set_option('mode.chained_assignment',None) 4 | 5 | #THIS READS IN BLOBSTATS DATA AND OUTPUTS IT IN CSV FORMAT 6 | parser=argparse.ArgumentParser(description="Provide a text file list of BlobStats outputs") 7 | parser.add_argument("-f","--filelist",required=True,action="store") 8 | parser.add_argument("-o","--out",required=True,action="store") 9 | parser.add_argument("-s","--season",required=True,action="store") 10 | parser.add_argument("-r","--region",required=True,action="store") 11 | parser.add_argument("-d","--dataset",required=True,action="store") 12 | #parser.add_argument("-c","--calendar",required=True,action="store") 13 | results=parser.parse_args() 14 | flist=open(results.filelist).read().splitlines() 15 | s=results.season 16 | r=results.region 17 | d=results.dataset 18 | fname_out=results.out 19 | 20 | #Get the headers from the first file 21 | colnames=open(flist[0]).readlines()[0].strip().split(",") 22 | colnames_full=["var","bnum","season","region"] 23 | colnames_full.extend(colnames) 24 | colnames_full.append("fname") 25 | 26 | #Output dataframe 27 | dat_total=pd.DataFrame(columns=colnames_full) 28 | for str_file in flist: 29 | dat=open(str_file).readlines() 30 | nl=len(dat) 31 | if(nl>1): 32 | for l in range(1,nl): 33 | lin=dat[l].strip().split("\t") 34 | if "Blob" in lin[0]: 35 | bnum=lin[0].split()[1] 36 | else: 37 | dict_line={"var":d,"bnum":bnum,"time":lin[0],"season":s,"region":r,"fname":str_file} 38 | # dict_line={"var":d,"bnum":bnum,"time":lin[0],"season":s,"region":r,"calendar":results.calendar,"fname":str_file} 39 | nc=1 40 | for v in colnames[1:len(colnames)]: 41 | dict_line[v]=float(lin[nc]) 42 | nc+=1 43 | dat_total=dat_total.append(dict_line,ignore_index=True) 44 | 45 | dat_total['area_km'] = dat_total['area']*(6371.*6371.) 46 | colnames_dat=dat_total.columns.tolist() 47 | colnames_dat[len(colnames_dat)-2]='area_km' 48 | colnames_dat[len(colnames_dat)-1]='fname' 49 | #reorder the columns (it was bugging me) 50 | dat_total=dat_total[colnames_dat] 51 | dat_total.to_csv(fname_out,index=False,na_rep='_') 52 | print("wrote {:}".format(fname_out)) 53 | -------------------------------------------------------------------------------- /analysis/blobmetrics/python/summStats.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | import xarray as xa 4 | import pandas as pd 5 | from datetime import datetime 6 | import cftime 7 | import math 8 | 9 | def deg2rad(deg): 10 | return(deg*(math.pi/180.)) 11 | 12 | def getDistanceFromLatLonInKm(lat1,lon1,lat2,lon2): 13 | R=6371. 14 | dlat=deg2rad(lat2-lat1) 15 | dlon=deg2rad(lon2-lon1) 16 | a=math.sin(dlat/2.)*math.sin(dlat/2.) + math.cos(deg2rad(lat1)) *\ 17 | math.cos(deg2rad(lat2)) *math.sin(dlon/2.)*math.sin(dlon/2.) 18 | 19 | b = 2. * math.atan2(math.sqrt(a),math.sqrt(1-a)) 20 | d=R*b 21 | return(d) 22 | 23 | 24 | parser=argparse.ArgumentParser(description="Parse the merged/index file and summarize") 25 | parser.add_argument("-f","--filein",required=True,action="store") 26 | parser.add_argument("-o","--out",required=True,action="store") 27 | #parser.add_argument("-c","--calendar",required=True,action="store") 28 | results=parser.parse_args() 29 | 30 | fname_out=results.out 31 | dat_in=pd.read_csv(results.filein,na_filter=False) 32 | dat_summ=pd.DataFrame() 33 | 34 | for f in sorted(list(set(dat_in['fname']))): 35 | dat_sub = dat_in[dat_in['fname'].str.match(f)] 36 | bnum_unique=set(dat_sub['bnum']) 37 | for b in sorted(list(bnum_unique)): 38 | dat_bsub=dat_sub.loc[dat_sub['bnum']==b] 39 | dat_bsub = dat_bsub.sort_values('time') 40 | sline=dat_bsub.iloc[0] 41 | eline=dat_bsub.iloc[len(dat_bsub)-1] 42 | calendar=sline['calendar'] 43 | stime=sline['time'] 44 | etime=eline['time'] 45 | ysnum=int(stime[:4]) 46 | msnum=int(stime[5:7]) 47 | dsnum=int(stime[8:10]) 48 | ssnum=int(stime[11:])/3600 49 | yenum=int(etime[:4]) 50 | menum=int(etime[5:7]) 51 | denum=int(etime[8:10]) 52 | senum=int(etime[11:])/3600 53 | if calendar=="360_day": 54 | sdate=cftime.Datetime360Day(ysnum,msnum,dsnum,ssnum) 55 | edate=cftime.Datetime360Day(yenum,menum,denum,senum) 56 | else: 57 | sdate=cftime.DatetimeNoLeap(ysnum,msnum,dsnum,ssnum) 58 | edate=cftime.DatetimeNoLeap(yenum,menum,denum,senum) 59 | td=edate-sdate 60 | num_days = td.days +1 61 | num_hrs = td.seconds/(3600*24) 62 | num_tot_days = num_days + num_hrs 63 | avg_clat=(sline['centlat']+eline['centlat'])/2. 64 | dict_bsub={"var":sline['var'],"bnum":b,"region":sline['region'],"startdate":sline['time'],"enddate":eline['time'], 65 | "duration_days":num_tot_days,"mean_centlat":dat_bsub['centlat'].mean(), 66 | "start_centlat":sline['centlat'],"start_centlon":sline['centlon'], 67 | "end_centlat":eline['centlat'],"end_centlon":eline['centlon'], 68 | "mean_centlat":dat_bsub['centlat'].mean(), 69 | "dist_km":getDistanceFromLatLonInKm(sline['centlat'],sline['centlon'], 70 | eline['centlat'],eline['centlon']), 71 | "zonal_dist_km":getDistanceFromLatLonInKm(avg_clat,sline['centlon'], 72 | avg_clat,eline['centlon']), 73 | "min_area_km":dat_bsub['area_km'].min(),"max_area_km":dat_bsub['area_km'].max(), 74 | "mean_area_km":dat_bsub['area_km'].mean(),"min_AI":dat_bsub['AI'].min(), 75 | "max_AI":dat_bsub['AI'].max(),"mean_AI":dat_bsub['AI'].mean(), 76 | "min_BI":dat_bsub['BI'].min(),"max_BI":dat_bsub['BI'].max(),"mean_BI":dat_bsub['BI'].mean()} 77 | dict_bsub['zonal_speed_km']=dict_bsub['dist_km']/(dict_bsub['duration_days']*24.) 78 | dict_bsub['fname']=sline['fname'] 79 | if (len(dat_bsub[dat_bsub['merged'].str.contains('YES')])>0): 80 | dict_bsub['merged'] = 'YES' 81 | else: 82 | dict_bsub['merged'] = 'NO' 83 | dat_summ=dat_summ.append(dict_bsub,ignore_index=True) 84 | #Write to file 85 | colnames_summ=dict_bsub.keys() 86 | print(colnames_summ) 87 | dat_summ=dat_summ[list(colnames_summ)] 88 | 89 | dat_summ.to_csv(fname_out,index=False,na_rep="_") 90 | print("Wrote {:}".format(fname_out)) 91 | 92 | -------------------------------------------------------------------------------- /analysis/blobmetrics/readfiles.R: -------------------------------------------------------------------------------- 1 | # This function reads the BlobStats files into a single data table 2 | read_stats_to_table<-function(flist,var="", 3 | rfn="",textfn="",csvfn="",df_name=""){ 4 | 5 | df_tot<-data.frame(NULL) 6 | df_name<-ifelse(df_name=="","df_tot",df_name) 7 | nline<-1 8 | for (f in flist){ 9 | print(sprintf("Reading in %s",f)) 10 | #Open each BlobStats file 11 | fl<-readLines(f) 12 | if (length(fl)>1){ 13 | #First line: Column names 14 | varnames<-unlist(strsplit(fl[1],split=",")) 15 | if (length(varnames)<2){ 16 | stop(sprintf("Check that file %s has the correct headers",f)) 17 | } 18 | tname<-varnames[1] 19 | varnames<-varnames[2:length(varnames)] 20 | fdat<-fl[2:length(fl)] 21 | for (l in fdat){ 22 | if (!is.na(pmatch("Blob",l))){ 23 | #Split the blobline 24 | blobline<-unlist(strsplit(l,split="\\s+")) 25 | bnum<-blobline[2] 26 | }else{ 27 | #Read in the info 28 | df_line<-data.frame(matrix(NA,nrow=1,ncol=length(varnames))) 29 | colnames(df_line)<-varnames 30 | #Line contains the time, then all the other data 31 | infoline<-unlist(strsplit(l,split="\\s+")) 32 | df_line[1,]<-as.numeric(infoline[2:length(infoline)]) 33 | #Format the date string 34 | date_vec<-unlist(strsplit(infoline[1],split="-")) 35 | #Year, Month, Day, Seconds 36 | d<-paste(date_vec[1:3],collapse="-") 37 | hr<-as.integer(as.numeric(date_vec[4])/(60*60)) 38 | df_tot[nline,"datehour"]<-sprintf("%s %02d:00:00",d,hr) 39 | 40 | for (v in 1:length(varnames)){ 41 | df_tot[nline,varnames[v]]<-as.numeric(df_line[1,v]) 42 | } 43 | if (!is.null(df_line[1,"area"])){ 44 | df_tot[nline,"area_km"]<-df_line[1,"area"]*4*pi*(6371^2) 45 | } 46 | if (var!=""){ 47 | df_tot[nline,"var"]<-var 48 | } 49 | df_tot[nline,"bnum"]<-bnum 50 | df_tot[nline,"file"]<-f 51 | nline<-nline+1 52 | } 53 | } 54 | }else{ 55 | print("No blobs found") 56 | } 57 | } 58 | 59 | if (rfn!=""){ 60 | assign(df_name,df_tot) 61 | 62 | save(list=c(df_name,"df_name"),file=rfn) 63 | print(sprintf("Wrote file %s",rfn)) 64 | } 65 | if (textfn!=""){ 66 | write.table(df_tot,file=textfn,sep="\t",row.names=FALSE,quote=FALSE) 67 | print(sprintf("Wrote file %s",textfn)) 68 | } 69 | if (csvfn!=""){ 70 | write.csv(df_tot,file=csvfn,row.names=FALSE,quote=FALSE) 71 | print(sprintf("Wrote file %s",csvfn)) 72 | } 73 | return(df_tot) 74 | } -------------------------------------------------------------------------------- /analysis/blobmetrics/readtable.R: -------------------------------------------------------------------------------- 1 | combine_dfs<-function(flist,ftype,rfn="",textfn="",csvfn="",df_outname=""){ 2 | df_data<-data.frame(NULL) 3 | df_outname<-ifelse(df_outname=="","df_data",df_outname) 4 | for (f in flist){ 5 | if (ftype=="R"){ 6 | load(f) 7 | df_tot<-get(df_name) 8 | }else if (ftype=="text"){ 9 | df_tot<-read.table(f,header=TRUE,sep="\t") 10 | }else if (ftype=="CSV"){ 11 | df_tot<-read.csv(f) 12 | }else{ 13 | stop("Invalid file type.") 14 | } 15 | df_data<-rbind(df_data,df_tot) 16 | } 17 | if (rfn!=""){ 18 | assign(df_outname,df_data) 19 | assign("df_name",df_outname) 20 | save(list=c(df_name,"df_name"),file=rfn) 21 | print(sprintf("Wrote file %s",rfn)) 22 | } 23 | if (textfn!=""){ 24 | write.table(df_data,file=textfn,sep="\t",row.names=FALSE,quote=FALSE) 25 | print(sprintf("Wrote file %s",textfn)) 26 | } 27 | if (csvfn!=""){ 28 | write.csv(df_tot,file=csvfn,row.names=FALSE,quote=FALSE) 29 | print(sprintf("Wrote file %s",csvfn)) 30 | } 31 | return(df_data) 32 | } -------------------------------------------------------------------------------- /analysis/blobmetrics/summarize.R: -------------------------------------------------------------------------------- 1 | #This file creates a summary table using BlobStats outputs 2 | #Input depends upon the variables that were measured 3 | 4 | #Minlon, maxlon, minlat, maxlat, centlat, centlon, area etc 5 | 6 | deg2rad<-function(deg) { 7 | return(deg * (pi/180)) 8 | } 9 | getDistanceFromLatLonInKm<-function(lat1,lon1,lat2,lon2) { 10 | R = 6371; 11 | dLat = deg2rad(lat2-lat1) 12 | dLon = deg2rad(lon2-lon1) 13 | a = sin(dLat/2) * sin(dLat/2) + 14 | cos(deg2rad(lat1)) * cos(deg2rad(lat2)) * 15 | sin(dLon/2) * sin(dLon/2) 16 | 17 | b = 2 * atan2(sqrt(a), sqrt(1-a)) 18 | d = R * b 19 | return(d) 20 | } 21 | 22 | #bcount<-1 23 | nline<-1 24 | gen_summary_table<-function(df_in,keep_merge=TRUE, 25 | rfn="",textfn="",csvfn="", 26 | df_summ_name=""){ 27 | df_summ<-data.frame(NULL) 28 | df_summ_name<-ifelse(df_summ_name=="","df_summ",df_summ_name) 29 | #print("Beginning analysis") 30 | #The output of these tables will be contingent upon whichever variables are available 31 | for (f in unique(df_in$file)){ 32 | dsub<-df_in[df_in$file==f,] 33 | for (v in unique(dsub$var)){ 34 | for (b in unique(dsub$bnum)){ 35 | dsub2<-df_in[(df_in$file==f & df_in$bnum==b & df_in$var==v),] 36 | dsub2<-dsub2[order(dsub2$datehour),] 37 | 38 | nhrs<-as.numeric(strftime(dsub2[2,"datehour"],format="%H"))-as.numeric(strftime(dsub2[1,"datehour"],format="%H")) 39 | merged_blob<-FALSE 40 | if (!is.null(dsub2$bnum2)){ 41 | #Check if there are any instances in which bnum!=bnum2 42 | for (a in 1:nrow(dsub2)){ 43 | if (dsub2[a,"bnum"]!=dsub2[a,"bnum2"]){ 44 | merged_blob<-TRUE 45 | break 46 | } 47 | } 48 | } 49 | 50 | sline<-dsub2[1,] 51 | eline<-dsub2[nrow(dsub2),] 52 | df_summ[nline,"startdate"]<-sline[1,"datehour"] 53 | df_summ[nline,"enddate"]<-eline[1,"datehour"] 54 | diff_days<-as.numeric(difftime(as.Date(eline[1,"datehour"]),as.Date(sline[1,"datehour"]),units="days")) 55 | hs<-as.numeric(strftime(sline[1,"datehour"],format="%H")) 56 | he<-as.numeric(strftime(eline[1,"datehour"],format="%H")) 57 | hdiff<-(he-hs + nhrs)/24 58 | df_summ[nline,"duration_days"]<-diff_days+(hdiff)+1 59 | df_summ[nline,"merged"]<-ifelse(merged_blob==FALSE,"NO","YES") 60 | if (!is.null(dsub2$centlat) & !is.null(dsub2$centlon)){ 61 | df_summ[nline,"start_centlat"]<-sline[1,"centlat"] 62 | df_summ[nline,"start_centlon"]<-sline[1,"centlon"] 63 | df_summ[nline,"end_centlat"]<-eline[1,"centlat"] 64 | df_summ[nline,"end_centlon"]<-eline[1,"centlon"] 65 | df_summ[nline,"dist_km"]<-getDistanceFromLatLonInKm(sline[1,"centlat"], 66 | sline[1,"centlon"], 67 | eline[1,"centlat"], 68 | eline[1,"centlon"]) 69 | avg_clat<-(sline[1,"centlat"]+eline[1,"centlat"])*0.5 70 | df_summ[nline,"zonal_dist_km"]<-getDistanceFromLatLonInKm(avg_clat, 71 | sline[1,"centlon"], 72 | avg_clat, 73 | eline[1,"centlon"]) 74 | df_summ[nline,"zonal_speed_kph"]<-df_summ[nline,"zonal_dist_km"]/(df_summ[nline,"duration_days"]*24) 75 | } 76 | if (!is.null(dsub2$area_km)){ 77 | df_summ[nline,"min_area"]<-min(dsub2$area_km) 78 | df_summ[nline,"max_area"]<-max(dsub2$area_km) 79 | df_summ[nline,"avg_area"]<-mean(dsub$area_km) 80 | } 81 | if (!is.null(dsub2$var)){ 82 | df_summ[nline,"var"]<-dsub2[1,"var"] 83 | } 84 | df_summ[nline,"bnum"]<-b 85 | df_summ[nline,"file"]<-f 86 | #df_summ[nline,"bnum2"]<-bcount 87 | #bcount<-bcount+1 88 | nline<-nline+1 89 | } 90 | } 91 | 92 | } 93 | if (keep_merge==FALSE){ 94 | df_summ<-df_summ[df_summ$merged=="NO",] 95 | } 96 | if (rfn!=""){ 97 | assign(df_summ_name,df_summ) 98 | save(list=c(df_summ_name),file=rfn) 99 | print(sprintf("Wrote file %s",rfn)) 100 | } 101 | if (textfn!=""){ 102 | write.table(df_summ,file=textfn,sep="\t",row.names=FALSE,quote=FALSE) 103 | print(sprintf("Wrote file %s",textfn)) 104 | } 105 | if (csvfn!=""){ 106 | write.csv(df_summ,file=csvfn,row.names=FALSE,quote=FALSE) 107 | print(sprintf("Wrote file %s",csvfn)) 108 | } 109 | #print("Returning summary table") 110 | return(df_summ) 111 | } 112 | 113 | 114 | -------------------------------------------------------------------------------- /aux/tc_basins_v1.dat: -------------------------------------------------------------------------------- 1 | #,Regular,latitude-longitude,polygonal,regions,delineating,TC,basins 2 | #, 3 | #,Structure,of,file,is,as,follows: 4 | # 5 | #, 6 | #,,,,,,, 7 | # 8 | #,Note,that,coordinate,of,vertices,must,be,specified,in,counter-clockwise,order 9 | # 10 | 10 11 | "S Indian",4,136,20,20,136,0,0,-90,-90 12 | "SW Pacific",4,216,136,136,216,0,0,-90,-90 13 | "NW Pacific",5,180,98.75,98.75,104,180,90,90,9,0,0 14 | "N Indian",5,98.75,20,20,104,98.75,90,90,0,0,9 15 | "N Atlantic",13,360,200,200,224,237,237,263,268,278,284,284,282,360,90,90,62,62,50,43,17,17,8.5,8.5,3,0,0 16 | "N Atlantic",4,20,0,0,20,90,90,0,0 17 | "NE Pacific",13,200,180,180,282,284,284,278,268,263,237,237,224,200,90,90,0,0,3,8.5,8.5,17,17,43,50,62,62 18 | "SE Pacific",6,298,216,216,289,289,298,0,0,-90,-90,-52,-19.5 19 | "S Atlantic",4,20,0,0,20,0,0,-90,-90 20 | "S Atlantic",6,360,298,298,289,289,360,0,0,-19.5,-52,-90,-90 21 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.aux: -------------------------------------------------------------------------------- 1 | \relax 2 | \providecommand\hyper@newdestlabel[2]{} 3 | \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} 4 | \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined 5 | \global\let\oldcontentsline\contentsline 6 | \gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} 7 | \global\let\oldnewlabel\newlabel 8 | \gdef\newlabel#1#2{\newlabelxx{#1}#2} 9 | \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} 10 | \AtEndDocument{\ifx\hyper@anchor\@undefined 11 | \let\contentsline\oldcontentsline 12 | \let\newlabel\oldnewlabel 13 | \fi} 14 | \fi} 15 | \global\let\hyper@last\relax 16 | \gdef\HyperFirstAtBeginDocument#1{#1} 17 | \providecommand\HyField@AuxAddToFields[1]{} 18 | \providecommand\HyField@AuxAddToCoFields[2]{} 19 | \@writefile{toc}{\contentsline {section}{\numberline {1}Setting up TempestExtremes}{2}{section.1}} 20 | \@writefile{toc}{\contentsline {section}{\numberline {2}Objective blocking detection methods}{2}{section.2}} 21 | \@writefile{toc}{\contentsline {paragraph}{Geopotential height gradient (TM90)}{2}{section*.2}} 22 | \@writefile{toc}{\contentsline {paragraph}{Anomaly methods (S04 and DG83)}{2}{section*.3}} 23 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Blocking Event Detection: The StitchBlobs Framework}{3}{subsection.2.1}} 24 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}General workflow}{3}{subsection.2.2}} 25 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.3}BlockingGHG}{4}{subsection.2.3}} 26 | \newlabel{ghg}{{2.3}{4}{BlockingGHG}{subsection.2.3}{}} 27 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.3.1}File input and output}{5}{subsubsection.2.3.1}} 28 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.4}Anomalies, Step 1 (S04): Input files: BlockingPV}{5}{subsection.2.4}} 29 | \newlabel{PV}{{2.4}{5}{Anomalies, Step 1 (S04): Input files: BlockingPV}{subsection.2.4}{}} 30 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.4.1}File input and output}{6}{subsubsection.2.4.1}} 31 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.5}Anomalies, Step 2: Long Term Daily Mean: BlockingDFT}{6}{subsection.2.5}} 32 | \newlabel{avg}{{2.5}{6}{Anomalies, Step 2: Long Term Daily Mean: BlockingDFT}{subsection.2.5}{}} 33 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.5.1}File input and output}{7}{subsubsection.2.5.1}} 34 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.6}Anomalies, Step 3: Unsmoothed and Smoothed Anomalies: BlockingDevs}{7}{subsection.2.6}} 35 | \newlabel{dev}{{2.6}{7}{Anomalies, Step 3: Unsmoothed and Smoothed Anomalies: BlockingDevs}{subsection.2.6}{}} 36 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.6.1}File input and output}{8}{subsubsection.2.6.1}} 37 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.7}Anomalies, Step 4a: Long Term Daily Mean of Anomalies: BlockingDFT}{8}{subsection.2.7}} 38 | \newlabel{davg}{{2.7}{8}{Anomalies, Step 4a: Long Term Daily Mean of Anomalies: BlockingDFT}{subsection.2.7}{}} 39 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.8}Anomalies, Step 4b: Spatiotemporally Varying Threshold: BlockingThresh}{8}{subsection.2.8}} 40 | \newlabel{thresh}{{2.8}{8}{Anomalies, Step 4b: Spatiotemporally Varying Threshold: BlockingThresh}{subsection.2.8}{}} 41 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.8.1}File input and output}{9}{subsubsection.2.8.1}} 42 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.9}Anomalies, Step 6: Normalized Anomalies: BlockingNormDevs}{9}{subsection.2.9}} 43 | \newlabel{ndev}{{2.9}{9}{Anomalies, Step 6: Normalized Anomalies: BlockingNormDevs}{subsection.2.9}{}} 44 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.9.1}File input and output}{10}{subsubsection.2.9.1}} 45 | \@writefile{toc}{\contentsline {subsection}{\numberline {2.10}Optional flags}{10}{subsection.2.10}} 46 | \newlabel{option}{{2.10}{10}{Optional flags}{subsection.2.10}{}} 47 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.10.1}Variable and axis names}{10}{subsubsection.2.10.1}} 48 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.10.2}File naming conventions}{11}{subsubsection.2.10.2}} 49 | \@writefile{toc}{\contentsline {subsubsection}{\numberline {2.10.3}Booleans}{11}{subsubsection.2.10.3}} 50 | \@writefile{toc}{\contentsline {section}{\numberline {3}DetectCyclonesUnstructured}{11}{section.3}} 51 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Variable Specification}{13}{subsection.3.1}} 52 | \newlabel{sec:VariableSpecification}{{3.1}{13}{Variable Specification}{subsection.3.1}{}} 53 | \@writefile{toc}{\contentsline {subsection}{\numberline {3.2}MPI Support}{14}{subsection.3.2}} 54 | \newlabel{sec:MPI}{{3.2}{14}{MPI Support}{subsection.3.2}{}} 55 | \@writefile{toc}{\contentsline {section}{\numberline {4}StitchNodes}{14}{section.4}} 56 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.dvi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/doc/TempestExtremes-UserGuide.dvi -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.idx: -------------------------------------------------------------------------------- 1 | \indexentry{Tibaldi and Molteni 1990|hyperpage}{2} 2 | \indexentry{Schwierz et al 2004|hyperpage}{3} 3 | \indexentry{Dole and Gordon 1983|hyperpage}{3} 4 | \indexentry{StitchBlobs|hyperpage}{3} 5 | \indexentry{BlobStats|hyperpage}{3} 6 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.ilg: -------------------------------------------------------------------------------- 1 | This is makeindex, version 2.15 [TeX Live 2015] (kpathsea + Thai support). 2 | Scanning input file TempestExtremes-UserGuide.idx....done (5 entries accepted, 0 rejected). 3 | Sorting entries....done (11 comparisons). 4 | Generating output file TempestExtremes-UserGuide.ind....done (18 lines written, 0 warnings). 5 | Output written in TempestExtremes-UserGuide.ind. 6 | Transcript written in TempestExtremes-UserGuide.ilg. 7 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.ind: -------------------------------------------------------------------------------- 1 | \begin{theindex} 2 | 3 | \item BlobStats, 3 4 | 5 | \indexspace 6 | 7 | \item Dole and Gordon 1983, 2 8 | 9 | \indexspace 10 | 11 | \item Schwierz et al 2004, 2 12 | \item StitchBlobs, 3 13 | 14 | \indexspace 15 | 16 | \item Tibaldi and Molteni 1990, 2 17 | 18 | \end{theindex} 19 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.out: -------------------------------------------------------------------------------- 1 | \BOOKMARK [1][-]{section.1}{Setting up TempestExtremes}{}% 1 2 | \BOOKMARK [1][-]{section.2}{Objective blocking detection methods}{}% 2 3 | \BOOKMARK [2][-]{subsection.2.1}{Blocking Event Detection: The StitchBlobs Framework}{section.2}% 3 4 | \BOOKMARK [2][-]{subsection.2.2}{General workflow}{section.2}% 4 5 | \BOOKMARK [2][-]{subsection.2.3}{BlockingGHG}{section.2}% 5 6 | \BOOKMARK [3][-]{subsubsection.2.3.1}{File input and output}{subsection.2.3}% 6 7 | \BOOKMARK [2][-]{subsection.2.4}{Anomalies, Step 1 \(S04\): Input files: BlockingPV}{section.2}% 7 8 | \BOOKMARK [3][-]{subsubsection.2.4.1}{File input and output}{subsection.2.4}% 8 9 | \BOOKMARK [2][-]{subsection.2.5}{Anomalies, Step 2: Long Term Daily Mean: BlockingDFT}{section.2}% 9 10 | \BOOKMARK [3][-]{subsubsection.2.5.1}{File input and output}{subsection.2.5}% 10 11 | \BOOKMARK [2][-]{subsection.2.6}{Anomalies, Step 3: Unsmoothed and Smoothed Anomalies: BlockingDevs}{section.2}% 11 12 | \BOOKMARK [3][-]{subsubsection.2.6.1}{File input and output}{subsection.2.6}% 12 13 | \BOOKMARK [2][-]{subsection.2.7}{Anomalies, Step 4a: Long Term Daily Mean of Anomalies: BlockingDFT}{section.2}% 13 14 | \BOOKMARK [2][-]{subsection.2.8}{Anomalies, Step 4b: Spatiotemporally Varying Threshold: BlockingThresh}{section.2}% 14 15 | \BOOKMARK [3][-]{subsubsection.2.8.1}{File input and output}{subsection.2.8}% 15 16 | \BOOKMARK [2][-]{subsection.2.9}{Anomalies, Step 6: Normalized Anomalies: BlockingNormDevs}{section.2}% 16 17 | \BOOKMARK [3][-]{subsubsection.2.9.1}{File input and output}{subsection.2.9}% 17 18 | \BOOKMARK [2][-]{subsection.2.10}{Optional flags}{section.2}% 18 19 | \BOOKMARK [3][-]{subsubsection.2.10.1}{Variable and axis names}{subsection.2.10}% 19 20 | \BOOKMARK [3][-]{subsubsection.2.10.2}{File naming conventions}{subsection.2.10}% 20 21 | \BOOKMARK [3][-]{subsubsection.2.10.3}{Booleans}{subsection.2.10}% 21 22 | \BOOKMARK [1][-]{section.3}{DetectCyclonesUnstructured}{}% 22 23 | \BOOKMARK [2][-]{subsection.3.1}{Variable Specification}{section.3}% 23 24 | \BOOKMARK [2][-]{subsection.3.2}{MPI Support}{section.3}% 24 25 | \BOOKMARK [1][-]{section.4}{StitchNodes}{}% 25 26 | -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/doc/TempestExtremes-UserGuide.pdf -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.synctex.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClimateGlobalChange/tempestextremes/8c0b5af91a9518fba597a80b08d92ebe69cabbc3/doc/TempestExtremes-UserGuide.synctex.gz -------------------------------------------------------------------------------- /doc/TempestExtremes-UserGuide.toc: -------------------------------------------------------------------------------- 1 | \contentsline {section}{\numberline {1}Setting up TempestExtremes}{2}{section.1} 2 | \contentsline {section}{\numberline {2}Objective blocking detection methods}{2}{section.2} 3 | \contentsline {paragraph}{Geopotential height gradient (TM90)}{2}{section*.2} 4 | \contentsline {paragraph}{Anomaly methods (S04 and DG83)}{2}{section*.3} 5 | \contentsline {subsection}{\numberline {2.1}Blocking Event Detection: The StitchBlobs Framework}{3}{subsection.2.1} 6 | \contentsline {subsection}{\numberline {2.2}General workflow}{3}{subsection.2.2} 7 | \contentsline {subsection}{\numberline {2.3}BlockingGHG}{4}{subsection.2.3} 8 | \contentsline {subsubsection}{\numberline {2.3.1}File input and output}{5}{subsubsection.2.3.1} 9 | \contentsline {subsection}{\numberline {2.4}Anomalies, Step 1 (S04): Input files: BlockingPV}{5}{subsection.2.4} 10 | \contentsline {subsubsection}{\numberline {2.4.1}File input and output}{6}{subsubsection.2.4.1} 11 | \contentsline {subsection}{\numberline {2.5}Anomalies, Step 2: Long Term Daily Mean: BlockingDFT}{6}{subsection.2.5} 12 | \contentsline {subsubsection}{\numberline {2.5.1}File input and output}{7}{subsubsection.2.5.1} 13 | \contentsline {subsection}{\numberline {2.6}Anomalies, Step 3: Unsmoothed and Smoothed Anomalies: BlockingDevs}{7}{subsection.2.6} 14 | \contentsline {subsubsection}{\numberline {2.6.1}File input and output}{8}{subsubsection.2.6.1} 15 | \contentsline {subsection}{\numberline {2.7}Anomalies, Step 4a: Long Term Daily Mean of Anomalies: BlockingDFT}{8}{subsection.2.7} 16 | \contentsline {subsection}{\numberline {2.8}Anomalies, Step 4b: Spatiotemporally Varying Threshold: BlockingThresh}{8}{subsection.2.8} 17 | \contentsline {subsubsection}{\numberline {2.8.1}File input and output}{9}{subsubsection.2.8.1} 18 | \contentsline {subsection}{\numberline {2.9}Anomalies, Step 6: Normalized Anomalies: BlockingNormDevs}{9}{subsection.2.9} 19 | \contentsline {subsubsection}{\numberline {2.9.1}File input and output}{10}{subsubsection.2.9.1} 20 | \contentsline {subsection}{\numberline {2.10}Optional flags}{10}{subsection.2.10} 21 | \contentsline {subsubsection}{\numberline {2.10.1}Variable and axis names}{10}{subsubsection.2.10.1} 22 | \contentsline {subsubsection}{\numberline {2.10.2}File naming conventions}{11}{subsubsection.2.10.2} 23 | \contentsline {subsubsection}{\numberline {2.10.3}Booleans}{11}{subsubsection.2.10.3} 24 | \contentsline {section}{\numberline {3}DetectCyclonesUnstructured}{11}{section.3} 25 | \contentsline {subsection}{\numberline {3.1}Variable Specification}{13}{subsection.3.1} 26 | \contentsline {subsection}{\numberline {3.2}MPI Support}{14}{subsection.3.2} 27 | \contentsline {section}{\numberline {4}StitchNodes}{14}{section.4} 28 | -------------------------------------------------------------------------------- /quick_make_unix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 2025, Hongyu Chen 3 | 4 | 5 | 6 | 7 | # This script should always be run under the root directory of the project. 8 | # It provides a general, quick command to build the project. 9 | # Please ensure the required NetCDF and HDF5 (and MPI, if needed) are available. 10 | 11 | # Configuration Options 12 | BUILD_TYPE="Release" # "Debug" or "Release" 13 | ENABLE_MPI="ON" # "ON" or "OFF" 14 | OPTIMIZATION_LEVEL="-O3" # Options: "-O0", "-O1", "-O2", "-O3", "-Ofast" 15 | DEBUG_SYMBOLS="OFF" # "ON" to include debug symbols (-g), "OFF" to exclude 16 | INSTALL_PREFIX="" # Specify the installation directory. 17 | # If left blank, it defaults to the project root (TEMPEST_EXTREMES_SOURCE_DIR) 18 | # and final executables will be installed in TEMPEST_EXTREMES_SOURCE_DIR/bin. 19 | 20 | ./remove_depend.sh 21 | 22 | 23 | 24 | # Detect system type 25 | OS_TYPE="$(uname -s)" 26 | SYSTEM_TYPE="" 27 | 28 | if [ "$OS_TYPE" == "Darwin" ]; then 29 | SYSTEM_TYPE="MacOS/Linux" 30 | elif [ "$OS_TYPE" == "Linux" ]; then 31 | # Check for NERSC Perlmutter: expect NERSC_HOST set to "perlmutter" 32 | if [ -n "$NERSC_HOST" ] && [ "$NERSC_HOST" = "perlmutter" ]; then 33 | SYSTEM_TYPE="NERSC Perlmutter" 34 | # Check if hostname contains "derecho" (case-insensitive) 35 | elif echo "$HOSTNAME" | grep -qi "derecho"; then 36 | SYSTEM_TYPE="NCAR Derecho" 37 | else 38 | SYSTEM_TYPE="MacOS/Linux" 39 | fi 40 | elif [[ "$OS_TYPE" == *"CYGWIN"* || "$OS_TYPE" == *"MINGW"* || "$OS_TYPE" == *"MSYS"* ]]; then 41 | SYSTEM_TYPE="Windows" 42 | else 43 | SYSTEM_TYPE="Unknown" 44 | fi 45 | 46 | echo "Detected system: ${SYSTEM_TYPE}" 47 | 48 | # System-specific module loading 49 | if [ "$SYSTEM_TYPE" = "MacOS/Linux" ]; then 50 | echo "Running on MacOS/Linux. Proceeding with default configuration..." 51 | # No additional module load commands needed. 52 | elif [ "$SYSTEM_TYPE" = "NERSC Perlmutter" ]; then 53 | echo "Loading modules for NERSC Perlmutter..." 54 | module load cray-hdf5 55 | module load cray-netcdf 56 | elif [ "$SYSTEM_TYPE" = "NCAR Derecho" ]; then 57 | echo "Loading modules for NCAR Derecho..." 58 | module load cmake 59 | module load ncarenv 60 | module load ncarcompilers 61 | module load intel 62 | module load cray-mpich 63 | module load netcdf 64 | elif [ "$SYSTEM_TYPE" = "Windows" ]; then 65 | echo "Windows detected. Please follow the README instructions for Windows build or manually run the commands in your bash enviroment." 66 | exit 1 67 | else 68 | echo "Unable to detect the system. Please refer to the README instructions or mannually try commands in ./quick_make_general.sh." 69 | exit 1 70 | fi 71 | 72 | ./remove_depend.sh 73 | 74 | # Load required modules for NetCDF and HDF5 75 | module load cray-hdf5 76 | module load cray-netcdf 77 | 78 | # Define the project root directory (where this script is) 79 | SRC_DIR="$(cd "$(dirname "$0")" && pwd)" 80 | 81 | # Remove any in-source CMake artifacts 82 | rm -rf "$SRC_DIR/CMakeCache.txt" "$SRC_DIR/CMakeFiles" "$SRC_DIR/Makefile" "$SRC_DIR/cmake_install.cmake" 83 | 84 | # Set INSTALL_PREFIX to SRC_DIR if not provided 85 | if [ -z "$INSTALL_PREFIX" ]; then 86 | INSTALL_PREFIX="$SRC_DIR" 87 | fi 88 | 89 | # Clean up the installed binary directory (./bin) before building 90 | rm -rf "$INSTALL_PREFIX/bin" 91 | 92 | # Use "./build" as the out-of-source build directory 93 | BUILD_DIR="${SRC_DIR}/build" 94 | rm -rf "$BUILD_DIR" 95 | mkdir "$BUILD_DIR" 96 | 97 | DEBUG_FLAGS="" 98 | if [ "$DEBUG_SYMBOLS" == "ON" ]; then 99 | DEBUG_FLAGS="-g" 100 | fi 101 | 102 | cd "$BUILD_DIR" || { echo "Build directory not found: $BUILD_DIR"; exit 1; } 103 | 104 | # Configure the project: 105 | # - The source directory is set to SRC_DIR. 106 | # - The installation prefix is set to INSTALL_PREFIX so that install() will copy targets to ${INSTALL_PREFIX}/bin. 107 | cmake -DCMAKE_CXX_COMPILER=CC \ 108 | -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ 109 | -DCMAKE_CXX_FLAGS_DEBUG="${OPTIMIZATION_LEVEL} ${DEBUG_FLAGS}" \ 110 | -DENABLE_MPI=${ENABLE_MPI} \ 111 | -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" \ 112 | "$SRC_DIR" 113 | 114 | if [ $? -ne 0 ]; then 115 | echo "CMake configuration failed. Exiting." 116 | exit 1 117 | fi 118 | 119 | # Build and install the project from the build directory 120 | make && make install 121 | 122 | if [ $? -ne 0 ]; then 123 | echo "Build or installation failed. Exiting." 124 | exit 1 125 | fi 126 | 127 | echo "Build and installation completed successfully." 128 | 129 | # (Optional) Cleanup step: 130 | # The build directory (./build) is used for development and debugging. 131 | # It contains all intermediate build files and temporary artifacts. 132 | # The final, user-deliverable executables are installed to ./bin. 133 | # It is not recommended to mix these directories. 134 | 135 | # For end users who want a clean structure, you can remove the build directory. 136 | # Developers or those debugging might prefer to keep it for faster incremental builds. 137 | make clean 138 | echo "Cleaned up the ${SRC_DIR}/build directory. All executables are located in ${INSTALL_PREFIX}/bin." 139 | 140 | -------------------------------------------------------------------------------- /remove_depend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | rm -rf src/atmrivers/depend 3 | rm -rf src/base/depend 4 | rm -rf src/blobmetrics/depend 5 | rm -rf src/blobs/depend 6 | rm -rf src/blocking/depend 7 | rm -rf src/netcdf-cxx-4.2/depend 8 | rm -rf src/nodes/depend 9 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | add_subdirectory(netcdf-cxx-4.2) 7 | add_subdirectory(base) 8 | add_subdirectory(blobs) 9 | add_subdirectory(blocking) 10 | add_subdirectory(nodes) 11 | add_subdirectory(sandbox) 12 | add_subdirectory(util) 13 | -------------------------------------------------------------------------------- /src/base/Announce.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file Announce.h 4 | /// \author Paul Ullrich 5 | /// \version July 26, 2010 6 | /// 7 | /// 8 | /// Functions for making announcements to standard output safely when 9 | /// using MPI. 10 | /// 11 | /// 12 | /// Copyright 2000-2010 Paul Ullrich 13 | /// 14 | /// This file is distributed as part of the Tempest source code package. 15 | /// Permission is granted to use, copy, modify and distribute this 16 | /// source code and its documentation under the terms of the GNU General 17 | /// Public License. This software is provided "as is" without express 18 | /// or implied warranty. 19 | /// 20 | 21 | #ifndef _ANNOUNCE_H_ 22 | #define _ANNOUNCE_H_ 23 | 24 | #include 25 | 26 | /////////////////////////////////////////////////////////////////////////////// 27 | 28 | extern int g_iVerbosityLevel; 29 | 30 | extern FILE * g_fpOutputBuffer; 31 | 32 | extern bool g_fOnlyOutputOnRankZero; 33 | 34 | /////////////////////////////////////////////////////////////////////////////// 35 | 36 | /// 37 | /// Get the output buffer. 38 | /// 39 | FILE * AnnounceGetOutputBuffer(); 40 | 41 | /// 42 | /// Set the output buffer. 43 | /// 44 | void AnnounceSetOutputBuffer(FILE * fpOutputBuffer); 45 | 46 | /// 47 | /// Set the verbosity level. 48 | /// 49 | void AnnounceSetVerbosityLevel(int iVerbosityLevel); 50 | 51 | /// 52 | /// Only output on rank zero. 53 | /// 54 | void AnnounceOnlyOutputOnRankZero(); 55 | 56 | /// 57 | /// Allow output on all ranks. 58 | /// 59 | void AnnounceOutputOnAllRanks(); 60 | 61 | /// 62 | /// Begin a new announcement block. 63 | /// 64 | void AnnounceStartBlock(const char * szText, ...); 65 | 66 | /// 67 | /// Begin a new announcement block. 68 | /// 69 | void AnnounceStartBlock(int iVerbosity, const char * szText); 70 | 71 | /// 72 | /// End an announcement block. 73 | /// 74 | void AnnounceEndBlock(const char * szText, ...); 75 | 76 | /// 77 | /// End an announcement block. 78 | /// 79 | void AnnounceEndBlock(int iVerbosity, const char * szText); 80 | 81 | /// 82 | /// Make an announcement. 83 | /// 84 | void Announce(const char * szText, ...); 85 | 86 | /// 87 | /// Make an announcement. 88 | /// 89 | void Announce(int iVerbosity, const char * szText, ...); 90 | 91 | /// 92 | /// Create a banner / separator containing the specified text. 93 | /// 94 | void AnnounceBanner(const char * szText = NULL); 95 | 96 | /////////////////////////////////////////////////////////////////////////////// 97 | 98 | #endif 99 | 100 | -------------------------------------------------------------------------------- /src/base/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND FILES 7 | Announce.h 8 | Announce.cpp 9 | AutoCurator.h 10 | AutoCurator.cpp 11 | CommandLine.h 12 | Constants.h 13 | CoordTransforms.h 14 | DataArray1D.h 15 | DataArray2D.h 16 | DataArray3D.h 17 | DataArray4D.h 18 | DataOp.h 19 | DataOp.cpp 20 | Defines.h 21 | Exception.h 22 | FilenameList.h 23 | FiniteElementTools.h 24 | FiniteElementTools.cpp 25 | FourierTransforms.h 26 | FunctionTimer.h 27 | FunctionTimer.cpp 28 | GaussLobattoQuadrature.h 29 | GaussLobattoQuadrature.cpp 30 | GaussQuadrature.h 31 | GaussQuadrature.cpp 32 | GridElements.h 33 | GridElements.cpp 34 | kdtree.h 35 | kdtree.cpp 36 | LatLonBox.h 37 | LegendrePolynomial.h 38 | LegendrePolynomial.cpp 39 | lodepng.h 40 | lodepng.cpp 41 | MathExpression.h 42 | MeshUtilities.h 43 | MeshUtilities.cpp 44 | MeshUtilitiesFuzzy.h 45 | MeshUtilitiesFuzzy.cpp 46 | NcFileVector.h 47 | NcFileVector.cpp 48 | NetCDFUtilities.h 49 | NetCDFUtilities.cpp 50 | NodeFileUtilities.h 51 | NodeFileUtilities.cpp 52 | order32.h 53 | PolynomialInterp.h 54 | PolynomialInterp.cpp 55 | RLLPolygonArray.h 56 | RLLPolygonArray.cpp 57 | ShpFile.h 58 | ShpFile.cpp 59 | SimpleGrid.h 60 | SimpleGrid.cpp 61 | SimpleGridUtilities.h 62 | SimpleGridUtilities.cpp 63 | SparseMatrix.h 64 | STLStringHelper.h 65 | Subscript.h 66 | ThresholdOp.h 67 | ThresholdOp.cpp 68 | TimeMatch.h 69 | TimeObj.h 70 | TimeObj.cpp 71 | Units.h 72 | Variable.h 73 | Variable.cpp 74 | ) 75 | 76 | add_library( 77 | extremesbase 78 | STATIC 79 | ${FILES} 80 | ) 81 | 82 | target_include_directories(extremesbase 83 | PRIVATE 84 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 85 | ${NetCDF_C_INCLUDE_DIR} 86 | ${MPI_CXX_INCLUDE_DIRS} 87 | ) 88 | 89 | set_target_properties( 90 | extremesbase PROPERTIES 91 | LINKER_LANGUAGE CXX 92 | ) 93 | 94 | # Install the static library to the "./lib" folder 95 | install( 96 | TARGETS extremesbase 97 | ARCHIVE DESTINATION lib 98 | ) 99 | -------------------------------------------------------------------------------- /src/base/Constants.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file Constants.h 4 | /// \author Paul Ullrich 5 | /// \version September 17, 2019 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _CONSTANTS_H_ 18 | #define _CONSTANTS_H_ 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | // Standard Earth Radius in meters 22 | static const double EarthRadius = 6.37122e6; 23 | 24 | /////////////////////////////////////////////////////////////////////////////// 25 | // Rotation rate of the planet, in inverse seconds 26 | static const double EarthOmega = 7.2921e-5; 27 | 28 | /////////////////////////////////////////////////////////////////////////////// 29 | // Gravitational acceleration at the surface, in meters per second squared 30 | static const double EarthGravity = 9.80616; 31 | 32 | /////////////////////////////////////////////////////////////////////////////// 33 | // Earth standard atmospheric pressure, in Pascals 34 | static const double EarthAtmosphericPressure = 101325.0; 35 | 36 | /////////////////////////////////////////////////////////////////////////////// 37 | // Knots per meter/second 38 | static const double KnotsPerMetersPerSecond = 1.94384; 39 | 40 | #endif // _CONSTANTS_H_ 41 | 42 | -------------------------------------------------------------------------------- /src/base/Defines.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file Defines.h 4 | /// \author Paul Ullrich 5 | /// \version September 17, 2019 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _DEFINES_H_ 18 | #define _DEFINES_H_ 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | 22 | typedef double Real; 23 | 24 | /////////////////////////////////////////////////////////////////////////////// 25 | // 26 | // Defines for floating point tolerance. 27 | // 28 | static const Real HighTolerance = 1.0e-10; 29 | static const Real ReferenceTolerance = 1.0e-12; 30 | 31 | /////////////////////////////////////////////////////////////////////////////// 32 | // 33 | // These defines determine the behavior of GenerateOverlapMesh. 34 | // 35 | // If OVERLAPMESH_RETAIN_REPEATED_NODES is specified this function will make 36 | // no effort to remove repeated nodes during the overlap mesh generation 37 | // calculation. 38 | // 39 | // If OVERLAPMESH_USE_UNSORTED_MAP is specified node removal will use an 40 | // std::unsorted_map() which may nonetheless produce some coincident nodes 41 | // if some very unlikely conditions are met. 42 | // 43 | // If OVERLAPMESH_USE_NODE_MULTIMAP is specified node removal will use the 44 | // node_multimap_3d which is guaranteed to produce no coincident nodes (but 45 | // is the slowest). 46 | // 47 | #define OVERLAPMESH_RETAIN_REPEATED_NODES 48 | //#define OVERLAPMESH_USE_UNSORTED_MAP 49 | //#define OVERLAPMESH_USE_NODE_MULTIMAP 50 | 51 | /////////////////////////////////////////////////////////////////////////////// 52 | // 53 | // This define specifies the bin width for the std::unsorted_map() and 54 | // node_multimap_3d. 55 | // 56 | #define OVERLAPMESH_BIN_WIDTH 1.0e-1 57 | 58 | /////////////////////////////////////////////////////////////////////////////// 59 | // 60 | // Round input time vector to nearest minute when loaded from a file. 61 | // This is to prevent issues with "rounding down" that may occur when times 62 | // are specified with a floating point type. 63 | // 64 | #define ROUND_TIMES_TO_NEAREST_MINUTE 65 | 66 | /////////////////////////////////////////////////////////////////////////////// 67 | 68 | #endif 69 | 70 | -------------------------------------------------------------------------------- /src/base/Exception.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file Exception.h 4 | /// \author Paul Ullrich 5 | /// \version July 26, 2010 6 | /// 7 | /// 8 | /// This file provides functionality for formatted Exceptions. 9 | /// 10 | /// 11 | /// Copyright 2000-2010 Paul Ullrich 12 | /// 13 | /// This file is distributed as part of the Tempest source code package. 14 | /// Permission is granted to use, copy, modify and distribute this 15 | /// source code and its documentation under the terms of the GNU General 16 | /// Public License. This software is provided "as is" without express 17 | /// or implied warranty. 18 | /// 19 | 20 | #ifndef _EXCEPTION_H_ 21 | #define _EXCEPTION_H_ 22 | 23 | /////////////////////////////////////////////////////////////////////////////// 24 | 25 | #include 26 | 27 | /////////////////////////////////////////////////////////////////////////////// 28 | 29 | #define _EXCEPTION() \ 30 | throw Exception(__FILE__, __LINE__) 31 | 32 | #define _EXCEPTIONT(text) \ 33 | throw Exception(__FILE__, __LINE__, text) 34 | 35 | #define _EXCEPTION1(text, var1) \ 36 | throw Exception(__FILE__, __LINE__, text, var1) 37 | 38 | #define _EXCEPTION2(text, var1, var2) \ 39 | throw Exception(__FILE__, __LINE__, text, var1, var2) 40 | 41 | #define _EXCEPTION3(text, var1, var2, var3) \ 42 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3) 43 | 44 | #define _EXCEPTION4(text, var1, var2, var3, var4) \ 45 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3, var4) 46 | 47 | #define _EXCEPTION5(text, var1, var2, var3, var4, var5) \ 48 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3, var4, var5) 49 | 50 | #define _EXCEPTION6(text, var1, var2, var3, var4, var5, var6) \ 51 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3, var4, var5, var6) 52 | 53 | #define _EXCEPTION7(text, var1, var2, var3, var4, var5, var6, var7) \ 54 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3, var4, var5, var6, var7) 55 | 56 | #define _EXCEPTION8(text, var1, var2, var3, var4, var5, var6, var7, var8) \ 57 | throw Exception(__FILE__, __LINE__, text, var1, var2, var3, var4, var5, var6, var7, var8) 58 | 59 | #define _ASSERT(x) \ 60 | if (!(x)) {_EXCEPTIONT("Assertion failure");} 61 | 62 | /////////////////////////////////////////////////////////////////////////////// 63 | 64 | #include 65 | #include 66 | #include 67 | 68 | /////////////////////////////////////////////////////////////////////////////// 69 | 70 | /// 71 | /// An Exception is a formatted error message that is generated from a 72 | /// throw directive. This class is automatically generated when using 73 | /// the _EXCEPTION macros. 74 | /// 75 | class Exception { 76 | 77 | public: 78 | /// 79 | /// Maximum buffer size for exception strings. 80 | /// 81 | static const int ExceptionBufferSize = 1024; 82 | 83 | public: 84 | /// 85 | /// Generic constructor. 86 | /// 87 | Exception( 88 | const char * szFile, 89 | unsigned int uiLine 90 | ) : 91 | m_strText("General exception"), 92 | m_strFile(szFile), 93 | m_uiLine(uiLine) 94 | { } 95 | 96 | /// 97 | /// Constructor with text and variables. 98 | /// 99 | Exception( 100 | const char * szFile, 101 | unsigned int uiLine, 102 | const char * szText, 103 | ... 104 | ) : 105 | m_strFile(szFile), 106 | m_uiLine(uiLine) 107 | { 108 | char szBuffer[ExceptionBufferSize]; 109 | 110 | va_list arguments; 111 | 112 | // Initialize the argument list 113 | va_start(arguments, szText); 114 | 115 | // Write to string 116 | vsnprintf(szBuffer, ExceptionBufferSize, szText, arguments); 117 | 118 | m_strText = szBuffer; 119 | 120 | // Cleans up the argument list 121 | va_end(arguments); 122 | } 123 | 124 | public: 125 | /// 126 | /// Get a string representation of this exception. 127 | /// 128 | std::string ToString() const { 129 | std::string strReturn; 130 | 131 | char szBuffer[ExceptionBufferSize]; 132 | 133 | // Preamble 134 | snprintf(szBuffer, ExceptionBufferSize, "EXCEPTION ("); 135 | strReturn.append(szBuffer); 136 | 137 | // File name 138 | strReturn.append(m_strFile); 139 | 140 | // Line number 141 | snprintf(szBuffer, ExceptionBufferSize, ", Line %u) ", m_uiLine); 142 | strReturn.append(szBuffer); 143 | 144 | // Text 145 | strReturn.append(m_strText); 146 | 147 | return strReturn; 148 | } 149 | 150 | private: 151 | /// 152 | /// A string denoting the error in question. 153 | /// 154 | std::string m_strText; 155 | 156 | /// 157 | /// A string containing the filename where the exception occurred. 158 | /// 159 | std::string m_strFile; 160 | 161 | /// 162 | /// A constant containing the line number where the exception 163 | /// occurred. 164 | /// 165 | unsigned int m_uiLine; 166 | }; 167 | 168 | /////////////////////////////////////////////////////////////////////////////// 169 | 170 | #endif 171 | 172 | -------------------------------------------------------------------------------- /src/base/FilenameList.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file FilenameList.h 4 | /// \author Paul Ullrich 5 | /// \version June 3, 2020 6 | /// 7 | /// 8 | /// Copyright 2020 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _FILENAMELIST_H_ 18 | #define _FILENAMELIST_H_ 19 | 20 | #include "Exception.h" 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | /////////////////////////////////////////////////////////////////////////////// 27 | 28 | class FilenameList : public std::vector { 29 | 30 | public: 31 | /// 32 | /// Parse the filename list from a file containing a list of filenames. 33 | /// 34 | void FromFile( 35 | const std::string & strFileListFile, 36 | bool fAllowMultipleFilesPerLine = true 37 | ) { 38 | std::ifstream ifFileList(strFileListFile.c_str()); 39 | if (!ifFileList.is_open()) { 40 | _EXCEPTION1("Unable to open file \"%s\"", 41 | strFileListFile.c_str()); 42 | } 43 | std::string strFileLine; 44 | while (std::getline(ifFileList, strFileLine)) { 45 | if (strFileLine.length() == 0) { 46 | continue; 47 | } 48 | if (strFileLine[0] == '#') { 49 | continue; 50 | } 51 | if (!fAllowMultipleFilesPerLine) { 52 | if (strFileLine.find(';') != std::string::npos) { 53 | _EXCEPTION1("Only one filename allowed per line in \"%s\"", 54 | strFileListFile.c_str()); 55 | } 56 | } 57 | push_back(strFileLine); 58 | } 59 | if (size() == 0) { 60 | _EXCEPTION1("No filenames found in \"%s\"", 61 | strFileListFile.c_str()); 62 | 63 | } 64 | } 65 | }; 66 | 67 | /////////////////////////////////////////////////////////////////////////////// 68 | 69 | #endif 70 | 71 | -------------------------------------------------------------------------------- /src/base/FiniteElementTools.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file FiniteElementTools.h 4 | /// \author Paul Ullrich 5 | /// \version August 14, 2014 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #include "DataArray1D.h" 18 | #include "DataArray2D.h" 19 | #include "DataArray3D.h" 20 | #include "GridElements.h" 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | static const double InverseMapTolerance = 1.0e-13; 25 | 26 | /////////////////////////////////////////////////////////////////////////////// 27 | 28 | class Mesh; 29 | 30 | /////////////////////////////////////////////////////////////////////////////// 31 | 32 | /// 33 | /// Get the array of GLL nodal locations within an element. 34 | /// 35 | void GetDefaultNodalLocations( 36 | int nP, 37 | DataArray1D & dG 38 | ); 39 | 40 | /////////////////////////////////////////////////////////////////////////////// 41 | 42 | /// 43 | /// Apply the local map. 44 | /// 45 | void ApplyLocalMap( 46 | const Face & face, 47 | const NodeVector & nodes, 48 | double dAlpha, 49 | double dBeta, 50 | Node & node 51 | ); 52 | 53 | /////////////////////////////////////////////////////////////////////////////// 54 | 55 | /// 56 | /// Apply the local map. 57 | /// 58 | void ApplyLocalMap( 59 | const Face & face, 60 | const NodeVector & nodes, 61 | double dAlpha, 62 | double dBeta, 63 | Node & node, 64 | Node & dDx1G, 65 | Node & dDx2G 66 | ); 67 | 68 | /////////////////////////////////////////////////////////////////////////////// 69 | 70 | /// 71 | /// Apply inverse map using Newton's method. 72 | /// 73 | void ApplyInverseMap( 74 | const Face & face, 75 | const NodeVector & nodes, 76 | const Node & node, 77 | double & dAlpha, 78 | double & dBeta 79 | ); 80 | 81 | /////////////////////////////////////////////////////////////////////////////// 82 | 83 | /// 84 | /// Generate Mesh meta data for a spectral element grid. 85 | /// 86 | double GenerateMetaData( 87 | const Mesh & mesh, 88 | int nP, 89 | bool fBubble, 90 | DataArray3D & dataGLLnodes, 91 | DataArray3D & dataGLLJacobian 92 | ); 93 | 94 | /////////////////////////////////////////////////////////////////////////////// 95 | 96 | /// 97 | /// Generate unique Jacobian values from non-unique Jacobians. 98 | /// 99 | void GenerateUniqueJacobian( 100 | const DataArray3D & dataGLLnodes, 101 | const DataArray3D & dataGLLJacobian, 102 | DataArray1D & dataUniqueJacobian 103 | ); 104 | 105 | /////////////////////////////////////////////////////////////////////////////// 106 | 107 | /// 108 | /// Generate Jacobian vector from Jacobians on GLL nodes. 109 | /// 110 | void GenerateDiscontinuousJacobian( 111 | const DataArray3D & dataGLLJacobian, 112 | DataArray1D & dataUniqueJacobian 113 | ); 114 | 115 | /////////////////////////////////////////////////////////////////////////////// 116 | 117 | /// 118 | /// Get the coefficients for sampling a 2D finite element at the 119 | /// specified point. 120 | /// 121 | void SampleGLLFiniteElement( 122 | int nMonotoneType, 123 | int nP, 124 | double dAlpha, 125 | double dBeta, 126 | DataArray2D & dCoeff 127 | ); 128 | 129 | /////////////////////////////////////////////////////////////////////////////// 130 | 131 | -------------------------------------------------------------------------------- /src/base/FourierTransforms.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file FourierTransforms.h 4 | /// \author Paul Ullrich 5 | /// \version June 13, 2020 6 | /// 7 | /// 8 | /// Copyright 2020 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _FOURIERTRANSFORMS_H_ 18 | #define _FOURIERTRANSFORMS_H_ 19 | 20 | #include "DataArray1D.h" 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | /// 25 | /// Apply a Fourier filter to a given data sequence. 26 | /// 27 | 28 | // TODO: Test this function more thoroughly 29 | template 30 | void fourier_filter( 31 | T * const data, 32 | size_t sCount, 33 | size_t sStride, 34 | size_t sModes, 35 | DataArray1D & an, 36 | DataArray1D & bn 37 | ) { 38 | _ASSERT(an.GetRows() >= sModes); 39 | _ASSERT(bn.GetRows() >= sModes); 40 | 41 | an.Zero(); 42 | bn.Zero(); 43 | 44 | { 45 | for (int n = 0; n < sCount; n++) { 46 | an[0] += data[n*sStride]; 47 | } 48 | 49 | double dProd0 = 2.0 * M_PI / static_cast(sCount); 50 | for (int k = 1; k < sModes; k++) { 51 | double dProd1 = dProd0 * static_cast(k); 52 | for (int n = 0; n < sCount; n++) { 53 | double dProd2 = dProd1 * static_cast(n); 54 | an[k] += data[n*sStride] * cos(dProd2); 55 | bn[k] -= data[n*sStride] * sin(dProd2); 56 | } 57 | 58 | //an[sCount-k] = an[k]; 59 | //bn[sCount-k] = -bn[k]; 60 | } 61 | } 62 | { 63 | for (int n = 0; n < sCount; n++) { 64 | data[n*sStride] = 0.0; 65 | } 66 | 67 | double dProd0 = 2.0 * M_PI / static_cast(sCount); 68 | for (int n = 0; n < sCount; n++) { 69 | data[n*sStride] += an[0]; 70 | 71 | double dProd1 = dProd0 * static_cast(n); 72 | for (int k = 1; k < sModes; k++) { 73 | double dProd2 = dProd1 * static_cast(k); 74 | double dProd3 = dProd1 * static_cast(sCount-k); 75 | data[n*sStride] += an[k] * cos(dProd2) - bn[k] * sin(dProd2); 76 | data[n*sStride] += an[k] * cos(dProd3) + bn[k] * sin(dProd3); 77 | 78 | //printf("%1.15e %1.15e : ", cos(dProd2), cos(dProd3)); 79 | //printf("%1.15e %1.15e\n", sin(dProd2), sin(dProd3)); 80 | } 81 | //for (int k = sCount-sModes+1; k < sCount; k++) { 82 | // double dProd2 = dProd1 * static_cast(k); 83 | // data[n*sStride] += an[k] * cos(dProd2) - bn[k] * sin(dProd2); 84 | //} 85 | } 86 | 87 | for (int n = 0; n < sCount; n++) { 88 | data[n*sStride] /= static_cast(sCount); 89 | } 90 | } 91 | } 92 | 93 | /////////////////////////////////////////////////////////////////////////////// 94 | 95 | #endif // _FOURIERTRANSFORMS_H_ 96 | 97 | -------------------------------------------------------------------------------- /src/base/FunctionTimer.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file FunctionTimer.h 4 | /// \author Paul Ullrich 5 | /// \version July 26, 2010 6 | /// 7 | /// 8 | /// Copyright 2021 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _FUNCTIONTIMER_H_ 18 | #define _FUNCTIONTIMER_H_ 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | /////////////////////////////////////////////////////////////////////////////// 27 | 28 | /// 29 | /// FunctionTimer is a class used for timing operations or groups of 30 | /// operations. Timing is provided via the std::chrono library 31 | /// and is calculated in microseconds. 32 | /// 33 | class FunctionTimer { 34 | 35 | public: 36 | /// 37 | /// Microseconds per second. 38 | /// 39 | static const unsigned long long MICROSECONDS_PER_SECOND = 1000000; 40 | 41 | public: 42 | /// 43 | /// A structure for storing group data. 44 | /// 45 | struct TimerGroupData { 46 | unsigned long long iTotalTime; 47 | unsigned long long nEntries; 48 | }; 49 | 50 | /// 51 | /// The map structure for storing group data. 52 | /// 53 | typedef std::map GroupDataMap; 54 | 55 | typedef std::pair GroupDataPair; 56 | 57 | public: 58 | /// 59 | /// Constructor. 60 | /// 61 | /// 62 | /// Group name used for organizing grouped operations. 63 | /// 64 | FunctionTimer(const char *szGroup = NULL); 65 | 66 | /// 67 | /// Destructor. 68 | /// 69 | virtual ~FunctionTimer() { 70 | StopTime(); 71 | } 72 | 73 | /// 74 | /// Reset the timer. 75 | /// 76 | void Reset(); 77 | 78 | /// 79 | /// Return the time elapsed since this timer began. 80 | /// 81 | /// 82 | /// If true stores the elapsed time in the group structure. 83 | /// 84 | unsigned long long Time(bool fDone = false); 85 | 86 | /// 87 | /// Return the time elapsed since this timer began and store in 88 | /// group data. 89 | /// 90 | unsigned long long StopTime(); 91 | 92 | public: 93 | /// 94 | /// Retrieve a group data record. 95 | /// 96 | static const TimerGroupData & GetGroupTimeRecord(const char *szName); 97 | 98 | /// 99 | /// Retrieve the total time from a group data record. 100 | /// 101 | static unsigned long long GetTotalGroupTime(const char *szName); 102 | 103 | /// 104 | /// Retrieve the average time from a group data record. 105 | /// 106 | static unsigned long long GetAverageGroupTime(const char *szName); 107 | 108 | /// 109 | /// Retrieve the number of entries from a group data record. 110 | /// 111 | static unsigned long long GetNumberOfEntries(const char *szName); 112 | 113 | /// 114 | /// Reset the group data record. 115 | /// 116 | static void ResetGroupTimeRecord(const char *szName); 117 | 118 | private: 119 | /// 120 | /// Group data. 121 | /// 122 | static GroupDataMap m_mapGroupData; 123 | 124 | private: 125 | /// 126 | /// Timer is stopped. 127 | /// 128 | bool m_fStopped; 129 | 130 | /// 131 | /// Time at which this timer was constructed. 132 | /// m_tvStartTime; 134 | 135 | /// 136 | /// Time at which this timer was stopped. 137 | /// m_tvStopTime; 139 | 140 | /// 141 | /// Group name associated with this timer. 142 | /// 143 | std::string m_strGroup; 144 | }; 145 | 146 | /////////////////////////////////////////////////////////////////////////////// 147 | 148 | #endif 149 | 150 | -------------------------------------------------------------------------------- /src/base/GaussLobattoQuadrature.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file GaussLobattoQuadrature.h 4 | /// \author Paul Ullrich 5 | /// \version July 9, 2012 6 | /// 7 | /// 8 | /// Copyright 2000-2010 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _GAUSSLOBATTOQUADRATURE_H_ 18 | #define _GAUSSLOBATTOQUADRATURE_H_ 19 | 20 | #include "DataArray1D.h" 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | /// 25 | /// Quadrature nodes and weights for Gauss-Lobatto quadrature. 26 | /// 27 | class GaussLobattoQuadrature { 28 | 29 | public: 30 | /// 31 | /// Return the Gauss-Lobatto quadrature points and their corresponding 32 | /// weights for the given number of points. 33 | /// 34 | static void GetPoints( 35 | int nCount, 36 | DataArray1D & dG, 37 | DataArray1D & dW 38 | ); 39 | 40 | /// 41 | /// Retrun the Gauss-Lobatto quadrature points and their corresponding 42 | /// weights for the given number of points and reference element. 43 | /// 44 | static void GetPoints( 45 | int nCount, 46 | double dXi0, 47 | double dXi1, 48 | DataArray1D & dG, 49 | DataArray1D & dW 50 | ); 51 | }; 52 | 53 | /////////////////////////////////////////////////////////////////////////////// 54 | 55 | #endif 56 | 57 | -------------------------------------------------------------------------------- /src/base/GaussQuadrature.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file GaussQuadrature.h 4 | /// \author Paul Ullrich 5 | /// \version March 30, 2013 6 | /// 7 | /// 8 | /// Copyright 2000-2010 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _GAUSSQUADRATURE_H_ 18 | #define _GAUSSQUADRATURE_H_ 19 | 20 | #include "DataArray1D.h" 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | /// 25 | /// Quadrature nodes and weights for Gaussian quadrature. 26 | /// 27 | class GaussQuadrature { 28 | 29 | public: 30 | /// 31 | /// Return the Gauss-Lobatto quadrature points and their corresponding 32 | /// weights for the given number of points. 33 | /// 34 | static void GetPoints( 35 | int nCount, 36 | DataArray1D & dG, 37 | DataArray1D & dW 38 | ); 39 | 40 | /// 41 | /// Retrun the Gauss-Lobatto quadrature points and their corresponding 42 | /// weights for the given number of points and reference element. 43 | /// 44 | static void GetPoints( 45 | int nCount, 46 | double dXi0, 47 | double dXi1, 48 | DataArray1D & dG, 49 | DataArray1D & dW 50 | ); 51 | }; 52 | 53 | /////////////////////////////////////////////////////////////////////////////// 54 | 55 | #endif 56 | -------------------------------------------------------------------------------- /src/base/LegendrePolynomial.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file LegendrePolynomial.h 4 | /// \author Paul Ullrich 5 | /// \version July 26, 2010 6 | /// 7 | /// 8 | /// Copyright 2000-2010 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _LEGENDREPOLYNOMIAL_H_ 18 | #define _LEGENDREPOLYNOMIAL_H_ 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | 22 | /// 23 | /// A class for handling operations involving Legendre polynomials. 24 | /// 25 | class LegendrePolynomial { 26 | 27 | private: 28 | /// 29 | /// Private constructor. 30 | /// 31 | LegendrePolynomial() 32 | { } 33 | 34 | public: 35 | /// 36 | /// Evaluate the Legendre polynomial and its derivative with given 37 | /// degree at point dX. 38 | /// 39 | static void EvaluateValueAndDerivative( 40 | int nDegree, 41 | double dX, 42 | double & dValue, 43 | double & dDerivative 44 | ); 45 | 46 | /// 47 | /// Evaluate the Legendre polynomial of the given degree at point dX. 48 | /// 49 | static double Evaluate( 50 | int nDegree, 51 | double dX 52 | ); 53 | 54 | /// 55 | /// Evaluate the derivative of the Legendre polynomial of the given 56 | /// degree at point dX. 57 | /// 58 | static double EvaluateDerivative( 59 | int nDegree, 60 | double dX 61 | ); 62 | 63 | /// 64 | /// Determine the number of real roots of the derivative of the 65 | /// Legendre polynomial of given degree. 66 | /// 67 | static int DerivativeRootCount( 68 | int nDegree 69 | ) { 70 | return (nDegree - 1); 71 | } 72 | 73 | /// 74 | /// Retrieve the specified root of the derivative of the Legendre 75 | /// polynomial of the given degree. 76 | /// 77 | static double DerivativeRoot( 78 | int nDegree, 79 | int nRoot 80 | ); 81 | 82 | /// 83 | /// Retrieve the specified root of the extended derivative of the 84 | /// Legendre polynomial of the given degree. The extended derivative 85 | /// is defined as (x^2 - 1) * P'(x), where P'(x) is the usual 86 | /// derivative. 87 | /// 88 | static double DerivativeExtendedRoot( 89 | int nDegree, 90 | int nRoot 91 | ); 92 | 93 | public: 94 | /// 95 | /// Evaluate the characteristic function at the given point. 96 | /// 97 | static double EvaluateCharacteristic( 98 | int nDegree, 99 | int nRoot, 100 | double dX 101 | ); 102 | 103 | /// 104 | /// Determine the number of real roots of the Legendre polynomial of 105 | /// the given degree. 106 | /// 107 | static int RootCount( 108 | int nDegree 109 | ) { 110 | return nDegree; 111 | } 112 | 113 | /// 114 | /// Return all roots to the Legendre polynomial of the 115 | /// given degree. 116 | /// 117 | static void AllRoots( 118 | int nDegree, 119 | double * dRoots 120 | ); 121 | 122 | /// 123 | /// Return all roots to the derivative of the Legendre polynomial of the 124 | /// given degree. 125 | /// 126 | static void AllDerivativeRoots( 127 | int nDegree, 128 | double * dRoots 129 | ); 130 | 131 | /// 132 | /// Return the given root to the Legendre polynomial of the 133 | /// given degree. 134 | /// 135 | static double Root( 136 | int nDegree, 137 | int nRoot 138 | ); 139 | 140 | }; 141 | 142 | /////////////////////////////////////////////////////////////////////////////// 143 | 144 | #endif 145 | 146 | -------------------------------------------------------------------------------- /src/base/MeshUtilities.cpp: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file MeshUtilities.cpp 4 | /// \author Paul Ullrich 5 | /// \version August 7, 2014 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #include "MeshUtilities.h" 18 | 19 | #include "Exception.h" 20 | 21 | /////////////////////////////////////////////////////////////////////////////// 22 | 23 | void MeshUtilities::FindFaceFromNode( 24 | const Mesh & mesh, 25 | const Node & node, 26 | FindFaceStruct & aFindFaceStruct 27 | ) { 28 | // Reset the FaceStruct 29 | aFindFaceStruct.vecFaceIndices.clear(); 30 | aFindFaceStruct.vecFaceLocations.clear(); 31 | aFindFaceStruct.loc = Face::NodeLocation_Undefined; 32 | 33 | // Loop through all faces to find overlaps 34 | // Note: This algorithm can likely be dramatically improved 35 | for (int l = 0; l < mesh.faces.size(); l++) { 36 | Face::NodeLocation loc; 37 | int ixLocation; 38 | 39 | ContainsNode( 40 | mesh.faces[l], 41 | mesh.nodes, 42 | node, 43 | loc, 44 | ixLocation); 45 | 46 | if (loc == Face::NodeLocation_Exterior) { 47 | continue; 48 | } 49 | 50 | #ifdef VERBOSE 51 | printf("%i\n", l); 52 | printf("n: %1.5e %1.5e %1.5e\n", node.x, node.y, node.z); 53 | printf("n0: %1.5e %1.5e %1.5e\n", 54 | mesh.nodes[mesh.faces[l][0]].x, 55 | mesh.nodes[mesh.faces[l][0]].y, 56 | mesh.nodes[mesh.faces[l][0]].z); 57 | printf("n1: %1.5e %1.5e %1.5e\n", 58 | mesh.nodes[mesh.faces[l][1]].x, 59 | mesh.nodes[mesh.faces[l][1]].y, 60 | mesh.nodes[mesh.faces[l][1]].z); 61 | printf("n2: %1.5e %1.5e %1.5e\n", 62 | mesh.nodes[mesh.faces[l][2]].x, 63 | mesh.nodes[mesh.faces[l][2]].y, 64 | mesh.nodes[mesh.faces[l][2]].z); 65 | printf("n3: %1.5e %1.5e %1.5e\n", 66 | mesh.nodes[mesh.faces[l][3]].x, 67 | mesh.nodes[mesh.faces[l][3]].y, 68 | mesh.nodes[mesh.faces[l][3]].z); 69 | #endif 70 | 71 | if (aFindFaceStruct.loc == Face::NodeLocation_Undefined) { 72 | aFindFaceStruct.loc = loc; 73 | } 74 | 75 | // Node is in the interior of this face 76 | if (loc == Face::NodeLocation_Interior) { 77 | if (loc != aFindFaceStruct.loc) { 78 | _EXCEPTIONT("No consensus on location of Node"); 79 | } 80 | 81 | aFindFaceStruct.vecFaceIndices.push_back(l); 82 | aFindFaceStruct.vecFaceLocations.push_back(ixLocation); 83 | break; 84 | } 85 | 86 | // Node is on the edge of this face 87 | if (loc == Face::NodeLocation_Edge) { 88 | if (loc != aFindFaceStruct.loc) { 89 | _EXCEPTIONT("No consensus on location of Node"); 90 | } 91 | 92 | aFindFaceStruct.vecFaceIndices.push_back(l); 93 | aFindFaceStruct.vecFaceLocations.push_back(ixLocation); 94 | } 95 | 96 | // Node is at the corner of this face 97 | if (loc == Face::NodeLocation_Corner) { 98 | if (loc != aFindFaceStruct.loc) { 99 | _EXCEPTIONT("No consensus on location of Node"); 100 | } 101 | 102 | aFindFaceStruct.vecFaceIndices.push_back(l); 103 | aFindFaceStruct.vecFaceLocations.push_back(ixLocation); 104 | } 105 | } 106 | 107 | // Edges can only have two adjacent Faces 108 | if (aFindFaceStruct.loc == Face::NodeLocation_Edge) { 109 | if (aFindFaceStruct.vecFaceIndices.size() != 2) { 110 | printf("n: %1.5e %1.5e %1.5e\n", node.x, node.y, node.z); 111 | _EXCEPTION2("Node found on edge with %i neighboring face(s) (%i)", 112 | aFindFaceStruct.vecFaceIndices.size(), 113 | (int)(aFindFaceStruct.vecFaceIndices.size())); 114 | } 115 | } 116 | 117 | // Corners must have at least three adjacent Faces 118 | if (aFindFaceStruct.loc == Face::NodeLocation_Corner) { 119 | if (aFindFaceStruct.vecFaceIndices.size() < 3) { 120 | printf("n: %1.5e %1.5e %1.5e\n", node.x, node.y, node.z); 121 | _EXCEPTION1("Two Faced corner detected (%i)", 122 | (int)(aFindFaceStruct.vecFaceIndices.size())); 123 | } 124 | } 125 | } 126 | 127 | /////////////////////////////////////////////////////////////////////////////// 128 | 129 | -------------------------------------------------------------------------------- /src/base/MeshUtilities.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file MeshUtilities.h 4 | /// \author Paul Ullrich 5 | /// \version August 7, 2014 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _MESHUTILITIES_H_ 18 | #define _MESHUTILITIES_H_ 19 | 20 | #include "GridElements.h" 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | class MeshUtilities { 25 | 26 | public: 27 | /// 28 | /// Determine if this face contains the specified Node, and whether 29 | /// the Node is along an edge or at a corner. 30 | /// 31 | virtual void ContainsNode( 32 | const Face & face, 33 | const NodeVector & nodevec, 34 | const Node & node, 35 | Face::NodeLocation & loc, 36 | int & ixLocation 37 | ) const = 0; 38 | 39 | /// 40 | /// Find all Face indices that contain this Node. 41 | /// 42 | void FindFaceFromNode( 43 | const Mesh & mesh, 44 | const Node & node, 45 | FindFaceStruct & aFindFaceStruct 46 | ); 47 | 48 | }; 49 | 50 | /////////////////////////////////////////////////////////////////////////////// 51 | 52 | #endif 53 | -------------------------------------------------------------------------------- /src/base/MeshUtilitiesFuzzy.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file MeshUtilitiesFuzzy.h 4 | /// \author Paul Ullrich 5 | /// \version August 7, 2014 6 | /// 7 | /// 8 | /// Copyright 2000-2014 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _MESHUTILITIESFUZZY_H_ 18 | #define _MESHUTILITIESFUZZY_H_ 19 | 20 | #include "GridElements.h" 21 | #include "MeshUtilities.h" 22 | 23 | #include 24 | 25 | /////////////////////////////////////////////////////////////////////////////// 26 | 27 | /// 28 | /// Various implementations of methods for determining Faces from Nodes. 29 | /// 30 | class MeshUtilitiesFuzzy : public MeshUtilities { 31 | 32 | public: 33 | /// 34 | /// Do nothing. 35 | /// 36 | inline Node ToRealCoords( 37 | Node & node 38 | ) { 39 | return node; 40 | } 41 | 42 | /// 43 | /// Determine if two Nodes are equal. 44 | /// 45 | bool AreNodesEqual( 46 | const Node & node0, 47 | const Node & node1 48 | ); 49 | 50 | /// 51 | /// Determine if a node is to the right or left of an edge. 52 | /// 53 | /// 54 | /// +1 if the node is to the left of the edge (interior) 55 | /// 0 if the node is on the edge (coincident) 56 | /// -1 if the node is to the right of the edge (exterior) 57 | /// 58 | int FindNodeEdgeSide( 59 | const Node & nodeBegin, 60 | const Node & nodeEnd, 61 | const Edge::Type edgetype, 62 | const Node & nodeTest 63 | ) const; 64 | 65 | /// 66 | /// Determine if face contains node, and whether 67 | /// the Node is along an edge or at a corner. 68 | /// 69 | void ContainsNode( 70 | const Face & face, 71 | const NodeVector & nodevec, 72 | const Node & node, 73 | Face::NodeLocation & loc, 74 | int & ixLocation 75 | ) const; 76 | 77 | /// 78 | /// Calculate all intersections between the Edge connecting 79 | /// nodeFirstBegin and nodeFirstEnd with type typeFirst and the Edge 80 | /// connecting nodeSecondBegin and nodeSecondEnd with type typeSecond. 81 | /// Intersections are recorded in nodeIntersections. 82 | /// 83 | /// 84 | /// Returns true if lines are coincident, false otherwise. 85 | /// 86 | /// If lines are coincident, intersections includes any nodes of Second 87 | /// that are contained in First, ordered from FirstBegin to FirstEnd. 88 | /// 89 | bool CalculateEdgeIntersectionsSemiClip( 90 | const Node & nodeFirstBegin, 91 | const Node & nodeFirstEnd, 92 | Edge::Type typeFirst, 93 | const Node & nodeSecondBegin, 94 | const Node & nodeSecondEnd, 95 | Edge::Type typeSecond, 96 | std::vector & nodeIntersections 97 | ); 98 | 99 | /// 100 | /// Calculate all intersections between the Edge connecting 101 | /// nodeFirstBegin and nodeFirstEnd with type typeFirst and the Edge 102 | /// connecting nodeSecondBegin and nodeSecondEnd with type typeSecond. 103 | /// Intersections are recorded in nodeIntersections. 104 | /// 105 | /// 106 | /// Returns true if lines are coincident, false otherwise. 107 | /// 108 | /// If lines are coincident, intersections includes any nodes of Second 109 | /// that are contained in First, ordered from FirstBegin to FirstEnd. 110 | /// 111 | bool CalculateEdgeIntersections( 112 | const Node & nodeFirstBegin, 113 | const Node & nodeFirstEnd, 114 | Edge::Type typeFirst, 115 | const Node & nodeSecondBegin, 116 | const Node & nodeSecondEnd, 117 | Edge::Type typeSecond, 118 | std::vector & nodeIntersections, 119 | bool fIncludeFirstBeginNode = false 120 | ); 121 | 122 | /// 123 | /// Find the Face that is near ixNode in the direction of nodeEnd. 124 | /// 125 | int FindFaceNearNode( 126 | const Mesh & mesh, 127 | int ixNode, 128 | const Node & nodeEnd, 129 | const Edge::Type edgetype 130 | ); 131 | 132 | /// 133 | /// Find the Face that is near nodeBegin in the direction of nodeEnd. 134 | /// 135 | int FindFaceNearNode( 136 | const Mesh & mesh, 137 | const Node & nodeBegin, 138 | const Node & nodeEnd, 139 | const Edge::Type edgetype, 140 | const FindFaceStruct & aFindFaceStruct 141 | ); 142 | }; 143 | 144 | /////////////////////////////////////////////////////////////////////////////// 145 | 146 | #endif 147 | 148 | -------------------------------------------------------------------------------- /src/base/PolynomialInterp.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file PolynomialInterp.h 4 | /// \author Paul Ullrich 5 | /// \version December 19, 2011 6 | /// 7 | /// 8 | /// Copyright 2000-2010 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _POLYNOMIALINTERP_H_ 18 | #define _POLYNOMIALINTERP_H_ 19 | 20 | #include 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | 24 | /// 25 | /// A class for performing polynomial interpolation. 26 | /// 27 | class PolynomialInterp { 28 | 29 | private: 30 | /// 31 | /// Private constructor. 32 | /// 33 | PolynomialInterp() 34 | { } 35 | 36 | public: 37 | /// 38 | /// Determine the coefficients of a Lagrangian polynomial through the 39 | /// specified points dX which is sampled at point dXsample. 40 | /// 41 | static void LagrangianPolynomialCoeffs( 42 | int nPoints, 43 | const double * dX, 44 | double * dCoeffs, 45 | double dXsample 46 | ); 47 | 48 | /// 49 | /// Determine the coefficients of the first derivative of a Lagrangian 50 | /// polynomial through the specified points dX which is sampled at 51 | /// point dXsample. 52 | /// 53 | static void DiffLagrangianPolynomialCoeffs( 54 | int nPoints, 55 | const double * dX, 56 | double * dCoeffs, 57 | double dXsample 58 | ); 59 | 60 | /// 61 | /// Determine the coefficients of the second derivative of a Lagrangian 62 | /// polynomial through the specified points dX which is sampled at 63 | /// point dXsample. 64 | /// 65 | static void DiffDiffLagrangianPolynomialCoeffs( 66 | int nPoints, 67 | const double * dX, 68 | double * dCoeffs, 69 | double dXsample 70 | ); 71 | 72 | /// 73 | /// Determine the coefficients of the third derivative of a Lagrangian 74 | /// polynomial through the specified points dX which is sampled at 75 | /// point dXsample. 76 | /// 77 | static void DiffDiffDiffLagrangianPolynomialCoeffs( 78 | int nPoints, 79 | const double * dX, 80 | double * dCoeffs, 81 | double dXsample 82 | ); 83 | 84 | /// 85 | /// Interpolate a polynomial through the given (X,Y) points and sample 86 | /// at point dXsample. This method is faster and more computationally 87 | /// stable than InterpolateCoeffs. 88 | /// 89 | static double Interpolate( 90 | int nPoints, 91 | const double * dX, 92 | const double * dY, 93 | double dXsample 94 | ); 95 | 96 | /// 97 | /// Obtain the coefficients a_i of a polynomial interpolated through the 98 | /// points (X,Y), where 99 | /// p(x) = a_0 + a_1 (x - dXmid) + ... + a_(n-1) (x - dXmid)^(n-1) 100 | /// 101 | /// 102 | /// An external workspace of at least of size nPoints^2. 103 | /// 104 | /// 105 | /// An external workspace of at least size nPoints. 106 | /// 107 | static void InterpolateCoeffs( 108 | int nPoints, 109 | const double * dX, 110 | const double * dY, 111 | double * dA, 112 | double dXmid = 0.0, 113 | double * dWorkspace = NULL, 114 | int * iPivot = NULL 115 | ); 116 | }; 117 | 118 | /////////////////////////////////////////////////////////////////////////////// 119 | 120 | #endif 121 | 122 | -------------------------------------------------------------------------------- /src/base/RLLPolygonArray.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file RLLPolygonArray.h 4 | /// \author Paul Ullrich 5 | /// \version July 2, 2019 6 | /// 7 | /// 8 | /// Copyright 2000-2019 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #include 18 | #include 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | 22 | struct RLLPoint { 23 | double lon; 24 | double lat; 25 | }; 26 | 27 | typedef std::vector RLLPointVector; 28 | 29 | /////////////////////////////////////////////////////////////////////////////// 30 | 31 | class RLLPolygonArray { 32 | 33 | public: 34 | /// 35 | /// Constructor. 36 | /// 37 | RLLPolygonArray() 38 | { } 39 | 40 | public: 41 | /// 42 | /// Load in the node array from a file, with vertices 43 | /// specified in degrees. 44 | /// 45 | void FromFile( 46 | const std::string & strFilename 47 | ); 48 | 49 | public: 50 | /// 51 | /// Determine if the given point is within the polygon. 52 | /// 53 | /// 54 | /// Regular longitude-latitude coordinates of the testing 55 | /// point in degrees. 56 | /// 57 | const std::string & NameOfRegionContainingPoint( 58 | const RLLPoint & pt_in_degrees 59 | ); 60 | 61 | protected: 62 | /// 63 | /// Default polygon name. 64 | /// 65 | std::string m_strDefault; 66 | 67 | /// 68 | /// Array of polygon names. 69 | /// 70 | std::vector m_vecNames; 71 | 72 | /// 73 | /// Array of polygon nodes. 74 | /// 75 | std::vector m_vecNodes; 76 | }; 77 | 78 | /////////////////////////////////////////////////////////////////////////////// 79 | 80 | -------------------------------------------------------------------------------- /src/base/ShpFile.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file ShpFile.h 4 | /// \author Paul Ullrich 5 | /// \version May 18, 2022 6 | /// 7 | /// 8 | /// Copyright 2022 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _SHPFILE_H_ 18 | #define _SHPFILE_H_ 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | 22 | #include "Exception.h" 23 | #include "Announce.h" 24 | #include "GridElements.h" 25 | 26 | /////////////////////////////////////////////////////////////////////////////// 27 | 28 | void ReadShpFileAsMesh( 29 | const std::string & strInputFile, 30 | Mesh & mesh, 31 | bool fVerbose = false 32 | ); 33 | 34 | /////////////////////////////////////////////////////////////////////////////// 35 | 36 | #endif // _SHPFILE_H_ 37 | 38 | -------------------------------------------------------------------------------- /src/base/SimpleGridUtilities.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file SimpleGridUtilities.h 4 | /// \author Paul Ullrich 5 | /// \version August 14, 2018 6 | /// 7 | /// 8 | /// Copyright 2000-2018 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _SIMPLEGRIDUTILITIES_H_ 18 | #define _SIMPLEGRIDUTILITIES_H_ 19 | 20 | #include "SimpleGrid.h" 21 | #include "DataArray1D.h" 22 | 23 | #include 24 | 25 | /////////////////////////////////////////////////////////////////////////////// 26 | 27 | /// 28 | /// Find the minimum/maximum value of a field near the given point. 29 | /// 30 | /// 31 | /// Maximum distance from the initial point in degrees. 32 | /// 33 | /// 34 | /// Output node index at which the extremum occurs. 35 | /// 36 | /// 37 | /// Output value of the field taken at the extremum point. 38 | /// 39 | /// 40 | /// Output distance from the centerpoint at which the extremum occurs 41 | /// in great circle distance (degrees). 42 | /// 43 | template 44 | void FindLocalMinMax( 45 | const SimpleGrid & grid, 46 | bool fMinimum, 47 | const DataArray1D & data, 48 | int ix0, 49 | double dMaxDistDeg, 50 | int & ixExtremum, 51 | real & dExtremumValue, 52 | float & dRMaxDeg 53 | ); 54 | 55 | /// 56 | /// Find the locations of all minima in the given DataArray1D. 57 | /// 58 | template 59 | void FindAllLocalMinima( 60 | const SimpleGrid & grid, 61 | const DataArray1D & data, 62 | std::set & setMinima 63 | ); 64 | 65 | /// 66 | /// Find the locations of all maxima in the given DataArray1D. 67 | /// 68 | template 69 | void FindAllLocalMaxima( 70 | const SimpleGrid & grid, 71 | const DataArray1D & data, 72 | std::set & setMaxima 73 | ); 74 | 75 | /// 76 | /// Find the locations of all minima in the given DataArray1D 77 | /// with a prescribed threshold. 78 | /// 79 | template 80 | void FindAllLocalMinMaxWithThreshold( 81 | const SimpleGrid & grid, 82 | const DataArray1D & data, 83 | bool fMinima, 84 | const std::string & strThreshold, 85 | std::set & setMinima 86 | ); 87 | 88 | /// 89 | /// Find the locations of all local min/max in the given DataArray1D 90 | /// for a given search distance. 91 | /// 92 | template 93 | void FindAllLocalMinMaxWithGraphDistance( 94 | const SimpleGrid & grid, 95 | const DataArray1D & data, 96 | bool fMinima, 97 | int nMaxGraphDistance, 98 | std::set & setMinMax 99 | ); 100 | 101 | /// 102 | /// Find the local average of a field near the given point. 103 | /// 104 | /// 105 | /// Maximum distance from the initial point in degrees. 106 | /// 107 | template 108 | void FindLocalAverage( 109 | const SimpleGrid & grid, 110 | const DataArray1D & data, 111 | int ix0, 112 | double dMaxDistDeg, 113 | real & dAverage 114 | ); 115 | 116 | /// 117 | /// Find the largest positive or negative value of the closed contour 118 | /// delta in the given field at the given grid point. 119 | /// 120 | template 121 | void FindMaxClosedContourDelta( 122 | const SimpleGrid & grid, 123 | const DataArray1D & data, 124 | int ix0, 125 | double dDistDeg, 126 | double dMinMaxDistDeg, 127 | bool fMaxClosedContourDeltaSign, 128 | real & dMaxClosedContourDelta 129 | ); 130 | 131 | /// 132 | /// Find the weighted area of positive values in a given radius 133 | /// minus the weighted area of negative values in that radius. 134 | /// 135 | template 136 | void PositiveMinusNegativeWeightedArea( 137 | const SimpleGrid & grid, 138 | const DataArray1D & data, 139 | int ix0, 140 | double dDistDeg, 141 | real & dValue); 142 | 143 | /// 144 | /// Find the maximum value of the data field that is poleward of 145 | /// the given candidate within a given longitude swath. 146 | /// 147 | template 148 | void MaxPolewardValue( 149 | const SimpleGrid & grid, 150 | const DataArray1D & data, 151 | int ix0, 152 | double dDistDeg, 153 | real & dValue); 154 | 155 | /////////////////////////////////////////////////////////////////////////////// 156 | 157 | #endif // _SIMPLEGRIDUTILITIES_H_ 158 | 159 | -------------------------------------------------------------------------------- /src/base/Subscript.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file Subscript.h 4 | /// \author Bryce Adelstein-Lelbach aka wash 5 | /// \version January 28, 2016 6 | /// 7 | /// 8 | /// This file contains Subscript, a simple C++ meta-programming facility 9 | /// which enables C-style subscript indexing for contiguous 10 | /// multi-dimensional arrays. 11 | /// 12 | /// 13 | /// Copyright 2016 Bryce Adelstein-Lelbach aka wash 14 | /// 15 | /// Distributed under the Boost Software License, Version 1.0. (See 16 | /// accompanying file LICENSE_1_0.txt or copy at 17 | /// http://www.boost.org/LICENSE_1_0.txt) 18 | /// 19 | 20 | #if !defined(_SUBSCRIPT_H_) 21 | #define _SUBSCRIPT_H_ 22 | 23 | /////////////////////////////////////////////////////////////////////////////// 24 | 25 | #include 26 | #include 27 | 28 | /////////////////////////////////////////////////////////////////////////////// 29 | 30 | template 31 | struct index_array 32 | { 33 | T ix_[size]; 34 | 35 | #if defined(__INTEL_COMPILER) 36 | inline T& operator[](int i) { 37 | #else 38 | inline T& operator[](int i) noexcept { 39 | #endif 40 | return ix_[i]; 41 | } 42 | 43 | #if defined(__INTEL_COMPILER) 44 | inline const T& operator[](int i) const { 45 | #else 46 | inline const T& operator[](int i) const noexcept { 47 | #endif 48 | return ix_[i]; 49 | } 50 | }; 51 | 52 | /////////////////////////////////////////////////////////////////////////////// 53 | 54 | // expr := subscript+ 55 | // subscript := '[' c++-integral-expression ']' 56 | 57 | template 58 | struct Subscript; 59 | 60 | template 61 | struct Subscript 62 | { 63 | typedef std::ptrdiff_t size_type; 64 | 65 | // The index of the first free dimension. 66 | enum { Dim = NumDims - FreeDims }; 67 | 68 | T& object_; 69 | index_array indices_; 70 | 71 | #if defined(__INTEL_COMPILER) 72 | constexpr Subscript(T& object) 73 | #else 74 | constexpr Subscript(T& object) noexcept 75 | #endif 76 | : object_(object), indices_() {} 77 | 78 | #if defined(__INTEL_COMPILER) 79 | Subscript( 80 | size_type head, 81 | Subscript const& tail 82 | ) 83 | #else 84 | Subscript( 85 | size_type head 86 | , Subscript const& tail 87 | ) noexcept 88 | #endif 89 | : object_(tail.object_) 90 | { 91 | for (size_type i = 0; i < Dim - 1; ++i) 92 | indices_[i] = tail.indices_[i]; 93 | indices_[Dim - 1] = head; 94 | } 95 | 96 | #if defined(__INTEL_COMPILER) 97 | Subscript operator[](size_type idx) const 98 | #else 99 | Subscript operator[](size_type idx) const noexcept 100 | #endif 101 | { 102 | return Subscript(idx, *this); 103 | } 104 | }; 105 | 106 | // This specialization is instantiated for the final index, and completes the 107 | // indexing operation. 108 | template 109 | struct Subscript 110 | { 111 | typedef std::ptrdiff_t size_type; 112 | 113 | // The index of the first free dimension. 114 | enum { Dim = NumDims - 1 }; 115 | 116 | T& object_; 117 | index_array indices_; 118 | 119 | #if defined(__INTEL_COMPILER) 120 | constexpr Subscript(T& object) 121 | #else 122 | constexpr Subscript(T& object) noexcept 123 | #endif 124 | : object_(object), indices_() {} 125 | 126 | #if defined(__INTEL_COMPILER) 127 | Subscript( 128 | size_type head, 129 | Subscript const& tail 130 | ) 131 | #else 132 | Subscript( 133 | size_type head, 134 | Subscript const& tail 135 | ) noexcept 136 | #endif 137 | : object_(tail.object_) 138 | { 139 | for (size_type i = 0; i < Dim - 1; ++i) 140 | indices_[i] = tail.indices_[i]; 141 | indices_[Dim - 1] = head; 142 | } 143 | 144 | /// 145 | /// Conversion-to-pointer operator, which allows us to return contigous 146 | /// 147 | operator typename std::conditional< 148 | // If we're bound to a const object... 149 | std::is_const::type>::value, 150 | // ... then we're convertible to a const pointer ... 151 | typename T::ValueType const*, 152 | // ... otherwise, we're convertible to a non-const pointer. 153 | typename T::ValueType* 154 | >::type() const 155 | { 156 | return object_(indices_); 157 | } 158 | }; 159 | 160 | #endif // _SUBSCRIPT_H_ 161 | 162 | -------------------------------------------------------------------------------- /src/base/TimeMatch.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file TimeMatch.h 4 | /// \author Paul Ullrich 5 | /// \version August 25, 2020 6 | /// 7 | /// 8 | /// Copyright 2020 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _TIMEMATCH_H_ 18 | #define _TIMEMATCH_H_ 19 | 20 | #include "Exception.h" 21 | 22 | #include 23 | 24 | #ifndef TEMPEST_NOREGEX 25 | #include 26 | #endif 27 | 28 | /////////////////////////////////////////////////////////////////////////////// 29 | 30 | #ifndef TEMPEST_NOREGEX 31 | void TestRegex() { 32 | std::string strExp("(00|06):.."); 33 | std::string strString1("00:55"); 34 | std::string strString2("01:55"); 35 | 36 | try { 37 | std::regex reTest; 38 | reTest.assign(strExp); 39 | 40 | std::smatch match1; 41 | if (!std::regex_search(strString1, match1, reTest)) { 42 | _EXCEPTIONT("std::regex failure; std::regex does not appear to work on this system"); 43 | } 44 | 45 | std::smatch match2; 46 | if (std::regex_search(strString2, match2, reTest)) { 47 | _EXCEPTIONT("std::regex failure; std::regex does not appear to work on this system"); 48 | } 49 | 50 | } catch(std::regex_error & reerr) { 51 | _EXCEPTION1("std::regex failure; std::regex does not appear to work on this system (%s)", 52 | reerr.what()); 53 | } 54 | } 55 | #endif 56 | 57 | /////////////////////////////////////////////////////////////////////////////// 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /src/base/order32.h: -------------------------------------------------------------------------------- 1 | #ifndef _ORDER32_H_ 2 | #define _ORDER32_H_ 3 | 4 | #include 5 | #include 6 | 7 | #if CHAR_BIT != 8 8 | #error "unsupported char size" 9 | #endif 10 | 11 | enum 12 | { 13 | O32_LITTLE_ENDIAN = 0x03020100ul, 14 | O32_BIG_ENDIAN = 0x00010203ul, 15 | O32_PDP_ENDIAN = 0x01000302ul 16 | }; 17 | 18 | static const union { unsigned char bytes[4]; uint32_t value; } o32_host_order = 19 | { { 0, 1, 2, 3 } }; 20 | 21 | #define O32_HOST_ORDER (o32_host_order.value) 22 | 23 | #endif 24 | -------------------------------------------------------------------------------- /src/blobs/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND BLOB_STATS_FILES 7 | BlobStats.cpp 8 | ) 9 | 10 | list(APPEND DETECT_BLOBS_FILES 11 | DetectBlobs.cpp 12 | ) 13 | 14 | list(APPEND PERSISTENT_BLOBS_FILES 15 | PersistentBlobs.cpp 16 | ) 17 | 18 | list(APPEND STITCH_BLOBS_FILES 19 | StitchBlobs.cpp 20 | ) 21 | 22 | include_directories( 23 | ${CMAKE_CURRENT_SOURCE_DIR} 24 | ${CMAKE_CURRENT_SOURCE_DIR}/../base 25 | ${CMAKE_CURRENT_SOURCE_DIR}/../blocking 26 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 27 | ${NetCDF_C_INCLUDE_DIR} 28 | ${MPI_CXX_INCLUDE_DIRS} 29 | ) 30 | 31 | add_executable(BlobStats ${BLOB_STATS_FILES}) 32 | target_link_libraries(BlobStats PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 33 | 34 | add_executable(DetectBlobs ${DETECT_BLOBS_FILES}) 35 | target_link_libraries(DetectBlobs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 36 | 37 | add_executable(PersistentBlobs ${PERSISTENT_BLOBS_FILES}) 38 | target_link_libraries(PersistentBlobs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 39 | 40 | add_executable(StitchBlobs ${STITCH_BLOBS_FILES}) 41 | target_link_libraries(StitchBlobs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 42 | 43 | # Install executables to the "bin" directory under the installation prefix. 44 | install( 45 | TARGETS BlobStats DetectBlobs PersistentBlobs StitchBlobs 46 | RUNTIME DESTINATION bin 47 | ) -------------------------------------------------------------------------------- /src/blocking/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND BLOCKING_UTILITIES_FILES 7 | BlockingUtilities.cpp 8 | ) 9 | 10 | list(APPEND AVG_VAR_FILES 11 | ${BLOCKING_UTILITIES_FILES} 12 | AvgVar.cpp 13 | ) 14 | 15 | list(APPEND BLOCKING_DEVS_FILES 16 | ${BLOCKING_UTILITIES_FILES} 17 | BlockingDevs.cpp 18 | ) 19 | 20 | list(APPEND BLOCKING_DFT_FILES 21 | ${BLOCKING_UTILITIES_FILES} 22 | BlockingDFT.cpp 23 | DFT.cpp 24 | ) 25 | 26 | list(APPEND BLOCKING_GHG_FILES 27 | ${BLOCKING_UTILITIES_FILES} 28 | BlockingGHG.cpp 29 | ) 30 | 31 | list(APPEND BLOCKING_NORM_DEVS_FILES 32 | ${BLOCKING_UTILITIES_FILES} 33 | BlockingNormDevs.cpp 34 | ) 35 | 36 | list(APPEND BLOCKING_PV_FILES 37 | ${BLOCKING_UTILITIES_FILES} 38 | BlockingPV.cpp 39 | Interpolate.cpp 40 | ) 41 | 42 | list(APPEND BLOCKING_THRESH_FILES 43 | ${BLOCKING_UTILITIES_FILES} 44 | BlockingThresh.cpp 45 | DFT.cpp 46 | ) 47 | 48 | list(APPEND COMBINE_BLOBS_FILES 49 | ${BLOCKING_UTILITIES_FILES} 50 | CombineBlobs.cpp 51 | ) 52 | 53 | list(APPEND DIALY_AVERAGE_FILES 54 | ${BLOCKING_UTILITIES_FILES} 55 | DailyAverage.cpp 56 | ) 57 | 58 | list(APPEND DENSITY_CALCULATIONS_FILES 59 | ${BLOCKING_UTILITIES_FILES} 60 | DensityCalculations.cpp 61 | ) 62 | 63 | list(APPEND DETREND_HEIGHTS_FILES 64 | ${BLOCKING_UTILITIES_FILES} 65 | DetrendHeights.cpp 66 | ) 67 | 68 | list(APPEND EXTRACT_TIME_STEP_FILES 69 | ${BLOCKING_UTILITIES_FILES} 70 | ExtractTimeStep.cpp 71 | ) 72 | 73 | list(APPEND SMOOTH_61_DAY_FILES 74 | ${BLOCKING_UTILITIES_FILES} 75 | Smooth61Day.cpp 76 | ) 77 | 78 | list(APPEND SPLIT_FILE_FILES 79 | ${BLOCKING_UTILITIES_FILES} 80 | SplitFile.cpp 81 | ) 82 | 83 | list(APPEND VAR4D_TO_3D_FILES 84 | ${BLOCKING_UTILITIES_FILES} 85 | Interpolate.cpp 86 | Var4Dto3D.cpp 87 | ) 88 | 89 | include_directories( 90 | ${CMAKE_CURRENT_SOURCE_DIR} 91 | ${CMAKE_CURRENT_SOURCE_DIR}/../base 92 | ${CMAKE_CURRENT_SOURCE_DIR}/../blocking 93 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 94 | ${NetCDF_C_INCLUDE_DIR} 95 | ${MPI_CXX_INCLUDE_DIRS} 96 | ) 97 | 98 | add_executable(AvgVar ${AVG_VAR_FILES}) 99 | target_link_libraries(AvgVar PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 100 | 101 | add_executable(BlockingDevs ${BLOCKING_DEVS_FILES}) 102 | target_link_libraries(BlockingDevs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 103 | 104 | add_executable(BlockingDFT ${BLOCKING_DFT_FILES}) 105 | target_link_libraries(BlockingDFT PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 106 | 107 | add_executable(BlockingGHG ${BLOCKING_GHG_FILES}) 108 | target_link_libraries(BlockingGHG PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 109 | 110 | add_executable(BlockingNormDevs ${BLOCKING_NORM_DEVS_FILES}) 111 | target_link_libraries(BlockingNormDevs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 112 | 113 | add_executable(BlockingPV ${BLOCKING_PV_FILES}) 114 | target_link_libraries(BlockingPV PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 115 | 116 | add_executable(BlockingThresh ${BLOCKING_THRESH_FILES}) 117 | target_link_libraries(BlockingThresh PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 118 | 119 | add_executable(CombineBlobs ${COMBINE_BLOBS_FILES}) 120 | target_link_libraries(CombineBlobs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 121 | 122 | add_executable(DailyAverage ${DIALY_AVERAGE_FILES}) 123 | target_link_libraries(DailyAverage PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 124 | 125 | add_executable(DensityCalculations ${DENSITY_CALCULATIONS_FILES}) 126 | target_link_libraries(DensityCalculations PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 127 | 128 | add_executable(DetrendHeights ${DETREND_HEIGHTS_FILES}) 129 | target_link_libraries(DetrendHeights PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 130 | 131 | add_executable(ExtractTimeStep ${EXTRACT_TIME_STEP_FILES}) 132 | target_link_libraries(ExtractTimeStep PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 133 | 134 | add_executable(Smooth61Day ${SMOOTH_61_DAY_FILES}) 135 | target_link_libraries(Smooth61Day PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 136 | 137 | add_executable(SplitFile ${SPLIT_FILE_FILES}) 138 | target_link_libraries(SplitFile PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 139 | 140 | add_executable(Var4Dto3D ${VAR4D_TO_3D_FILES}) 141 | target_link_libraries(Var4Dto3D PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 142 | 143 | 144 | install( 145 | TARGETS 146 | AvgVar 147 | BlockingDevs 148 | BlockingDFT 149 | BlockingGHG 150 | BlockingNormDevs 151 | BlockingPV 152 | BlockingThresh 153 | CombineBlobs 154 | DailyAverage 155 | DensityCalculations 156 | DetrendHeights 157 | ExtractTimeStep 158 | Smooth61Day 159 | SplitFile 160 | Var4Dto3D 161 | RUNTIME DESTINATION bin 162 | ) -------------------------------------------------------------------------------- /src/blocking/CombineBlobs.cpp: -------------------------------------------------------------------------------- 1 | ///////////////////////////////////// 2 | /// \file CombineBlobs.cpp 3 | /// \author Kyle Stachowicz 4 | /// \version May 23, 2017 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "BlockingUtilities.h" 13 | #include "CommandLine.h" 14 | #include "DataArray3D.h" 15 | #include "netcdfcpp.h" 16 | 17 | int main(int argc, char** argv) { 18 | std::string inListFileName; 19 | std::string outFileName; 20 | 21 | std::string timeDimName, latDimName, lonDimName; 22 | 23 | std::string blobVarName; 24 | 25 | BeginCommandLine() { 26 | CommandLineString(inListFileName, "inlist", ""); 27 | CommandLineString(outFileName, "out", ""); 28 | 29 | CommandLineString(timeDimName, "timeDim", "time"); 30 | CommandLineString(latDimName, "latDim", "lat"); 31 | CommandLineString(lonDimName, "lonDim", "lon"); 32 | 33 | CommandLineString(blobVarName, "blobVar", ""); 34 | ParseCommandLine(argc, argv); 35 | } 36 | EndCommandLine(argv); 37 | AnnounceBanner(); 38 | 39 | if (blobVarName.length() == 0) { 40 | std::cerr << "Error: no blob variable name provided!" << std::endl; 41 | std::exit(-1); 42 | } 43 | 44 | if (inListFileName.length() == 0) { 45 | std::cerr << "Error: no input list provided!" << std::endl; 46 | std::exit(-1); 47 | } 48 | 49 | std::vector inFileNames; 50 | GetInputFileList(inListFileName,inFileNames); 51 | /* { 52 | std::ifstream inListFile{inListFileName}; 53 | std::string fileNameBuf; 54 | while (std::getline(inListFile, fileNameBuf)) { 55 | inFileNames.push_back(fileNameBuf); 56 | } 57 | } 58 | */ 59 | 60 | int nFiles = inFileNames.size(); 61 | 62 | /* 63 | std::vector> inFiles; 64 | std::string fileName; 65 | // for (auto& fileName : inFileNames) { 66 | for (int x=0; xsize(); 83 | size_t latDimSize = refFile.get_dim(latDimName.c_str())->size(); 84 | size_t lonDimSize = refFile.get_dim(lonDimName.c_str())->size(); 85 | 86 | NcFile outFile{outFileName.c_str(), NcFile::Replace}; 87 | NcDim* outTimeDim = outFile.add_dim(timeDimName.c_str(), timeDimSize); 88 | NcDim* outLatDim = outFile.add_dim(latDimName.c_str(), latDimSize); 89 | NcDim* outLonDim = outFile.add_dim(lonDimName.c_str(), lonDimSize); 90 | 91 | { 92 | NcVar* outTimeVar = 93 | outFile.add_var(timeDimName.c_str(), ncDouble, outTimeDim); 94 | NcVar* outLatVar = outFile.add_var(latDimName.c_str(), ncDouble, outLatDim); 95 | NcVar* outLonVar = outFile.add_var(lonDimName.c_str(), ncDouble, outLonDim); 96 | 97 | NcVar* inTimeVar = refFile.get_var(timeDimName.c_str()); 98 | NcVar* inLatVar = refFile.get_var(latDimName.c_str()); 99 | NcVar* inLonVar = refFile.get_var(lonDimName.c_str()); 100 | 101 | copy_dim_var(inTimeVar, outTimeVar); 102 | copy_dim_var(inLatVar, outLatVar); 103 | copy_dim_var(inLonVar, outLonVar); 104 | } 105 | 106 | refFile.close(); 107 | NcVar* outBlobVar = outFile.add_var(blobVarName.c_str(), ncInt, outTimeDim, 108 | outLatDim, outLonDim); 109 | 110 | DataArray3D outData(timeDimSize, latDimSize, lonDimSize); 111 | DataArray3D inData(timeDimSize, latDimSize, lonDimSize); 112 | 113 | // for (auto& inFile : inFiles) { 114 | for (int x=0; xsize()); 118 | 119 | NcDim* latDim = inFile.get_dim(latDimName.c_str()); 120 | assert(latDimSize == latDim->size()); 121 | 122 | NcDim* lonDim = inFile.get_dim(lonDimName.c_str()); 123 | assert(lonDimSize == lonDim->size()); 124 | 125 | NcVar* blobVar = inFile.get_var(blobVarName.c_str()); 126 | blobVar->set_cur(0, 0, 0); 127 | blobVar->get(&(inData[0][0][0]), timeDimSize, latDimSize, lonDimSize); 128 | 129 | for (size_t t = 0; t < timeDimSize; t++) { 130 | for (size_t lat = 0; lat < latDimSize; lat++) { 131 | for (size_t lon = 0; lon < lonDimSize; lon++) { 132 | outData[t][lat][lon] = outData[t][lat][lon] || inData[t][lat][lon]; 133 | } 134 | } 135 | } 136 | inFile.close(); 137 | } 138 | 139 | outBlobVar->put(&(outData[0][0][0]), timeDimSize, latDimSize, lonDimSize); 140 | 141 | outFile.close(); 142 | } 143 | -------------------------------------------------------------------------------- /src/blocking/DFT.cpp: -------------------------------------------------------------------------------- 1 | ///////////////////////////////////////////////////////// 2 | /// 3 | /// DFT.cpp 4 | /// Author: Marielle Pinheiro 5 | /// Version 1.0 May 8, 2017 6 | /// 7 | ///////////////////////////////////////////////////////// 8 | 9 | /* 10 | This is a discrete Fourier transform function that will perform 11 | */ 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include "DFT.h" 18 | #include "Exception.h" 19 | 20 | std::vector > DFT(std::vector inputVals, 21 | int numCoefs 22 | ){ 23 | double pi = std::atan(1.0)*4.0; 24 | std::complex compi(0.,1.); 25 | 26 | int N = inputVals.size(); 27 | 28 | if (numCoefs > N){ 29 | _EXCEPTIONT("Number of specified coefficients exceeds length of input vector."); 30 | } 31 | 32 | //Declare the output array for the Fourier coefficients 33 | std::vector > FourierCoefs(N); 34 | 35 | //Couple of values for calculations 36 | double Ndiv = 1./N; 37 | std::complex expCoef(0.,0.); 38 | std::complex sumVals; 39 | int firstHalfNum = numCoefs/2; 40 | int secHalfNum = numCoefs-firstHalfNum; 41 | 42 | //Begin calculating the coefficients 43 | if (numCoefs(0.,0.); 48 | for (int n=0; n(0.,0.); 58 | for (int n=0; n(0.,0.); 70 | for (int n=0; n(0.,0.); 81 | } 82 | }*/ 83 | return(FourierCoefs); 84 | } 85 | 86 | std::vector IDFT(std::vector > FFTvals){ 87 | double pi = std::atan(1.)*4.; 88 | std::complex compi(0.,1.); 89 | 90 | int N = FFTvals.size(); 91 | //Create an output data vector (only the real part will be returned) 92 | std::vector > outputs(N); 93 | double Ndiv = 1./N; 94 | std::complex expCoef(0.,0.); 95 | std::complex sumVals; 96 | 97 | for (int k=0; k(0.,0.); 101 | for (int n=0; n realOutputs(N); 108 | for (int i=0; i 13 | #include 14 | #include 15 | #include 16 | #include "Exception.h" 17 | 18 | std::vector > DFT(std::vector inputVals, 19 | int numCoefs 20 | ); 21 | 22 | std::vector IDFT(std::vector > FFTvals); 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /src/blocking/Interp_z500.cpp: -------------------------------------------------------------------------------- 1 | ////////////////////////////////// 2 | /// 3 | /// \file interpolate.cpp 4 | /// \author Marielle Pinheiro 5 | /// \version March 24, 2015 6 | 7 | #include "Interp_z500.h" 8 | #include "BlockingUtilities.h" 9 | #include "NetCDFUtilities.h" 10 | #include "netcdfcpp.h" 11 | #include "DataArray1D.h" 12 | #include "DataArray3D.h" 13 | #include "DataArray4D.h" 14 | #include 15 | #include 16 | #include 17 | 18 | //////////////////////////////////////////////////////////////////////// 19 | 20 | void interp_1lev(NcVar *var, 21 | NcVar *hyam, 22 | NcVar *hybm, 23 | NcVar *ps, 24 | double plev, 25 | NcVar *NewVar 26 | ){ 27 | 28 | int nTime,nLev,nLat,nLon; 29 | double A1,A2,B1,B2,p1,p2,weight; 30 | 31 | //Array dimensions 32 | nTime = var->get_dim(0)->size(); 33 | nLev = var->get_dim(1)->size(); 34 | nLat = var->get_dim(2)->size(); 35 | nLon = var->get_dim(3)->size(); 36 | 37 | //Matrix to store PS 38 | DataArray3D matPS(nTime, nLat, nLon); 39 | ps->set_cur(0, 0, 0); 40 | ps->get(&(matPS[0][0][0]), nTime, nLat, nLon); 41 | 42 | //Matrix to store input variable data 43 | DataArray4D matVar(nTime, nLev, nLat, nLon); 44 | var->set_cur(0, 0, 0, 0); 45 | var->get(&(matVar[0][0][0][0]), nTime, nLev, nLat, nLon); 46 | 47 | //Matrix to store output variable data 48 | DataArray3D matVarOut(nTime, nLat, nLon); 49 | 50 | //hybrid coefficient A 51 | DataArray1D vecHyam(nLev); 52 | hyam->set_cur((long) 0); 53 | hyam->get(&(vecHyam[0]), nLev); 54 | 55 | //hybrid coefficient B 56 | DataArray1D vecHybm(nLev); 57 | hybm->set_cur((long) 0); 58 | hybm->get(&(vecHybm[0]), nLev); 59 | 60 | //Loop over input data and interpolate to output var 61 | for (int t=0; t=plev){ 72 | weight = ((plev-p1)/(p2-p1)); 73 | matVarOut[t][a][b] = weight*matVar[t][l+1][a][b] 74 | + (1.0-weight)*matVar[t][l][a][b]; 75 | } 76 | } 77 | } 78 | } 79 | } 80 | std::cout<<"Finished interpolating variable.\n"; 81 | 82 | NewVar->set_cur(0, 0, 0); 83 | NewVar->put(&(matVarOut[0][0][0]), nTime, nLat, nLon); 84 | CopyNcVarAttributes(var, NewVar); 85 | } 86 | 87 | 88 | 89 | void interp_z500(NcFile & readin, 90 | const std::string & strname_2d, 91 | const std::string & varname, 92 | NcFile & ifile_out) { 93 | 94 | //open 2D PS file 95 | NcFile readin_2d(strname_2d.c_str()); 96 | if (!readin_2d.is_valid()) { 97 | _EXCEPTION1("Unable to open file \"%s\" for reading", 98 | strname_2d.c_str()); 99 | } 100 | 101 | //Dimensions and associated variables 102 | NcDim *time = readin.get_dim("time"); 103 | int time_len = time->size(); 104 | NcVar *timevar = readin.get_var("time"); 105 | 106 | NcDim *lev = readin.get_dim("lev"); 107 | int lev_len = lev->size(); 108 | 109 | NcDim *lat = readin.get_dim("lat"); 110 | int lat_len = lat->size(); 111 | NcVar *latvar = readin.get_var("lat"); 112 | 113 | NcDim *lon = readin.get_dim("lon"); 114 | int lon_len = lon->size(); 115 | NcVar *lonvar = readin.get_var("lon"); 116 | 117 | //Variables 118 | NcVar *zvar = readin.get_var(varname.c_str()); 119 | 120 | //2D variables 121 | NcVar *ps = readin_2d.get_var("PS"); 122 | NcVar *hyam = readin.get_var("hyam"); 123 | NcVar *hybm = readin.get_var("hybm"); 124 | 125 | //Write information to outfile 126 | NcDim *itime = ifile_out.add_dim("time", time_len); 127 | NcDim *ilat = ifile_out.add_dim("lat", lat_len); 128 | NcDim *ilon = ifile_out.add_dim("lon", lon_len); 129 | 130 | NcVar *itime_vals = ifile_out.add_var("time", ncDouble, itime); 131 | NcVar *ilat_vals = ifile_out.add_var("lat", ncDouble, ilat); 132 | NcVar *ilon_vals = ifile_out.add_var("lon", ncDouble, ilon); 133 | 134 | copy_dim_var(timevar, itime_vals); 135 | copy_dim_var(latvar, ilat_vals); 136 | copy_dim_var(lonvar, ilon_vals); 137 | 138 | //Add interpolated variables to interpolated outfile 139 | NcVar *iz = ifile_out.add_var("Z500", ncDouble, itime, ilat, ilon); 140 | interp_1lev(zvar, hyam, hybm, ps, 50000.0, iz); 141 | 142 | 143 | readin_2d.close(); 144 | std::cout<<"Finished interpolating Z to 500 mb level."< 19 | #include 20 | #include 21 | 22 | void interp_1lev(NcVar *var, 23 | NcVar *hyam, 24 | NcVar *hybm, 25 | NcVar *ps, 26 | double plev, 27 | NcVar *NewVar 28 | ); 29 | 30 | void interp_z500(NcFile & readin, 31 | const std::string & strname_2d, 32 | const std::string & varname, 33 | NcFile & ifile_out); 34 | 35 | #endif 36 | -------------------------------------------------------------------------------- /src/blocking/Interpolate.cpp: -------------------------------------------------------------------------------- 1 | ////////////////////////////////// 2 | /// 3 | /// \file interpolate.cpp 4 | /// \author Marielle Pinheiro 5 | /// \version March 24, 2015 6 | 7 | #include "Interpolate.h" 8 | #include "BlockingUtilities.h" 9 | #include "NetCDFUtilities.h" 10 | #include "netcdfcpp.h" 11 | #include "DataArray1D.h" 12 | #include "DataArray3D.h" 13 | #include "DataArray4D.h" 14 | #include 15 | #include 16 | #include 17 | 18 | //////////////////////////////////////////////////////////////////////// 19 | 20 | void interp_util(NcFile & readin, 21 | const std::string & strname_2d, 22 | const std::string & varlist, 23 | NcFile & ifile_out) { 24 | 25 | //open 2D PS file 26 | NcFile readin_2d(strname_2d.c_str()); 27 | if (!readin_2d.is_valid()) { 28 | _EXCEPTION1("Unable to open file \"%s\" for reading", 29 | strname_2d.c_str()); 30 | } 31 | 32 | //Dimensions and associated variables 33 | NcDim *time = readin.get_dim("time"); 34 | int time_len = time->size(); 35 | NcVar *timevar = readin.get_var("time"); 36 | 37 | NcDim *lev = readin.get_dim("lev"); 38 | int lev_len = lev->size(); 39 | 40 | NcDim *lat = readin.get_dim("lat"); 41 | int lat_len = lat->size(); 42 | NcVar *latvar = readin.get_var("lat"); 43 | 44 | NcDim *lon = readin.get_dim("lon"); 45 | int lon_len = lon->size(); 46 | NcVar *lonvar = readin.get_var("lon"); 47 | 48 | //Variables 49 | /* NcVar *temp = readin.get_var("T"); 50 | NcVar *uvar = readin.get_var("U"); 51 | NcVar *vvar = readin.get_var("V"); 52 | */ 53 | 54 | //2D variables 55 | NcVar *ps = readin_2d.get_var("PS"); 56 | NcVar *hyam = readin_2d.get_var("hyam"); 57 | NcVar *hybm = readin_2d.get_var("hybm"); 58 | 59 | //Create pressure level vector 60 | int plev_len = (100000.0-5000.0)/(5000.0); 61 | 62 | DataArray1D pVals(plev_len); 63 | 64 | for (int i=0; iset_cur((long) 0); 87 | ilev_vals->put(&(pVals[0]), plev_len); 88 | 89 | //Split var list 90 | std::string delim = ","; 91 | size_t pos = 0; 92 | std::string token; 93 | std::vector varVec; 94 | std::string listcopy = varlist; 95 | 96 | while ((pos = listcopy.find(delim)) != std::string::npos){ 97 | token = listcopy.substr(0,pos); 98 | varVec.push_back(token); 99 | listcopy.erase(0,pos + delim.length()); 100 | } 101 | varVec.push_back(listcopy); 102 | 103 | /* 104 | //reads the string (separated by the delimiter) into the vector 105 | while((pos = varlist.find(delim)) != std::string::npos){ 106 | // token = varlist.substr(0,pos); 107 | varVec.push_back(token); 108 | varlist.erase(0,pos + delim.length()); 109 | // varlist.erase((std::string::size_type)0,(std::string::size_type)pos + delim.length()); 110 | 111 | } 112 | varVec.push_back(varlist); 113 | */ 114 | 115 | for (int v=0; v 19 | #include 20 | #include 21 | 22 | void interp_util(NcFile & readin, 23 | const std::string & strname_2d, 24 | const std::string & varlist, 25 | NcFile & ifile_out); 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /src/blocking/calcLinReg.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | import xarray as xa 4 | import pandas as pd 5 | 6 | 7 | 8 | parser=argparse.ArgumentParser(description="Provide a text file list for the linear regression calculation.") 9 | parser.add_argument("-f","--filelist",required=True,action="store",dest="lsname") 10 | parser.add_argument("-o","--out",required=True,action="store") 11 | parser.add_argument("--timename",default="time") 12 | parser.add_argument("--latname",default="lat") 13 | parser.add_argument("--lonname",default="lon") 14 | parser.add_argument("--varname",default="SMOOTHED_Z500") 15 | results=parser.parse_args() 16 | print("Opening file list.") 17 | flist= open(results.lsname,"r") 18 | datafiles=flist.read().splitlines() 19 | nyears=len(datafiles) 20 | yrange=np.arange(0.,nyears) 21 | 22 | #This is just a pointer to the dataset 23 | dataset=xa.open_mfdataset(datafiles,decode_times=False,concat_dim=results.timename) 24 | #rename dims if necessary 25 | dataset=dataset.rename({results.timename:'time',results.latname:'lat',results.lonname:'lon'}) 26 | 27 | #Read in the data 28 | 29 | svar=dataset[results.varname][:] 30 | tvar=dataset['time'][:] 31 | latvar=dataset['lat'][:] 32 | lonvar=dataset['lon'][:] 33 | nlat=len(latvar) 34 | nlon=len(lonvar) 35 | ndays=dataset.chunks[results.timename][0] 36 | #Initialize the arrays that will store the values 37 | marray=np.zeros((ndays,nlat,nlon)) 38 | barray=np.zeros((ndays,nlat,nlon)) 39 | print("Initialized arrays. Calculating linear regression coefficients.") 40 | for t in range(0,ndays): 41 | #Generate the dates for the linear regression 42 | #nday=int(dataset.time.dt.day[t]) 43 | #nmonth=int(dataset.time.dt.month[t]) 44 | #nyear=int(dataset.time.dt.year[t]) 45 | #dstring="{:04d}/{:02d}/{:02d}".format(nyear,nmonth,nday) 46 | #print(dstring) 47 | #tsel=pd.date_range(start=dstring,periods=nyears,freq=pd.DateOffset(years=1)) 48 | #Subset the data by the time steps 49 | zpts=svar.sel(time=t) 50 | print("Subsetting for time {:d}".format(t)) 51 | #Fit the linear regression to each lat/lon 52 | for x in range(0,nlat): 53 | for y in range(0,nlon): 54 | zsub=zpts.isel(lat=x,lon=y).values 55 | print("subset for indices {:},{:}".format(x,y)) 56 | #Check for nan 57 | idx=np.isfinite(yrange) & np.isfinite(zsub) 58 | m,b= np.polyfit(yrange[idx],zsub[idx],1) 59 | marray[t,x,y]=m 60 | barray[t,x,y]=b 61 | 62 | #Write the output file 63 | print("Writing {} to file".format(results.out)) 64 | daysSince=np.arange(0.,ndays,dtype='d') 65 | dataset_out=xa.Dataset({'slope':(['time','lat','lon'],marray), 66 | 'intercept':(['time','lat','lon'],barray)}, 67 | coords={'time':daysSince, 68 | 'lat':latvar, 69 | 'lon':lonvar}) 70 | 71 | netcdf_calendar = "standard" 72 | if (ndays<365): 73 | netcdf_calendar="360_day" 74 | 75 | dataset_out[results.timename].attrs={"units":"days since 0001-01-01","calendar":netcdf_calendar} 76 | dataset_out.to_netcdf(results.out) 77 | -------------------------------------------------------------------------------- /src/netcdf-cxx-4.2/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND FILES 7 | ncvalues.cpp 8 | netcdf.cpp 9 | ) 10 | 11 | add_library( 12 | netcdf_c++ 13 | STATIC 14 | ${FILES} 15 | ) 16 | 17 | target_include_directories( 18 | netcdf_c++ 19 | PRIVATE 20 | ${NetCDF_C_INCLUDE_DIR} 21 | ${CMAKE_CURRENT_SOURCE_DIR} 22 | ) 23 | 24 | target_link_libraries( 25 | netcdf_c++ 26 | PUBLIC 27 | NetCDF::NetCDF_C 28 | ${MPI_CXX_LIBRARIES} 29 | ) 30 | 31 | # Install the static library to the "lib" folder 32 | install( 33 | TARGETS netcdf_c++ 34 | ARCHIVE DESTINATION lib 35 | ) 36 | -------------------------------------------------------------------------------- /src/netcdf-cxx-4.2/COPYRIGHT: -------------------------------------------------------------------------------- 1 | /*! \file 2 | The NetCDF Copyright. 3 | 4 | \page copyright Copyright 5 | 6 | Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 7 | 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 University 8 | Corporation for Atmospheric Research/Unidata. 9 | 10 | Portions of this software were developed by the Unidata Program at the 11 | University Corporation for Atmospheric Research. 12 | 13 | Access and use of this software shall impose the following obligations 14 | and understandings on the user. The user is granted the right, without 15 | any fee or cost, to use, copy, modify, alter, enhance and distribute 16 | this software, and any derivative works thereof, and its supporting 17 | documentation for any purpose whatsoever, provided that this entire 18 | notice appears in all copies of the software, derivative works and 19 | supporting documentation. Further, UCAR requests that the user credit 20 | UCAR/Unidata in any publications that result from the use of this 21 | software or in any product that includes this software, although this 22 | is not an obligation. The names UCAR and/or Unidata, however, may not 23 | be used in any advertising or publicity to endorse or promote any 24 | products or commercial entity unless specific written permission is 25 | obtained from UCAR/Unidata. The user also understands that 26 | UCAR/Unidata is not obligated to provide the user with any support, 27 | consulting, training or assistance of any kind with regard to the use, 28 | operation and performance of this software nor to provide the user 29 | with any updates, revisions, new versions or "bug fixes." 30 | 31 | THIS SOFTWARE IS PROVIDED BY UCAR/UNIDATA "AS IS" AND ANY EXPRESS OR 32 | IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 33 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 34 | DISCLAIMED. IN NO EVENT SHALL UCAR/UNIDATA BE LIABLE FOR ANY SPECIAL, 35 | INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING 36 | FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, 37 | NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 38 | WITH THE ACCESS, USE OR PERFORMANCE OF THIS SOFTWARE. 39 | */ 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /src/netcdf-cxx-4.2/README: -------------------------------------------------------------------------------- 1 | netcdfcpp.h the C++ interface 2 | 3 | netcdf.cpp the implementation of the interface, on top of the current 4 | C library interface 5 | 6 | nctst.cpp a test program for the interface that creates a netCDF file 7 | and then dumps out its contents in ASCII form to stdout. 8 | This example may also be helpful in understanding how the 9 | interface is intended to be used. 10 | 11 | example.c example of C code needed to create a small netCDF file 12 | 13 | example.cpp analogous example of C++ code needed to do the same thing 14 | 15 | Makefile makefile for building nctst 16 | 17 | ncvalues.cpp interface for auxilliary classes of typed arrays needed by 18 | netCDF interface; fairly simple 19 | 20 | ncvalues.cpp implementation of auxilliary classes of typed arrays needed by 21 | netCDF interface 22 | 23 | README this file 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/netcdf-cxx-4.2/config.h: -------------------------------------------------------------------------------- 1 | /* config.h. Generated from config.h.in by configure. */ 2 | /* config.h.in. Generated from configure.ac by autoheader. */ 3 | 4 | /* if true, run extra tests which may not work yet */ 5 | /* #undef EXTRA_TESTS */ 6 | 7 | /* Define to 1 if you have the header file. */ 8 | #define HAVE_DLFCN_H 1 9 | 10 | /* Define to 1 if you have the header file. */ 11 | #define HAVE_INTTYPES_H 1 12 | 13 | /* Define to 1 if you have the header file. */ 14 | #define HAVE_MEMORY_H 1 15 | 16 | /* Define to 1 if you have the `nccreate' function. */ 17 | /* #undef HAVE_NCCREATE */ 18 | 19 | /* Define to 1 if you have the `nc_def_opaque' function. */ 20 | /* #undef HAVE_NC_DEF_OPAQUE */ 21 | 22 | /* Define to 1 if you have the `nc_set_log_level' function. */ 23 | /* #undef HAVE_NC_SET_LOG_LEVEL */ 24 | 25 | /* Define to 1 if you have the `nc_use_parallel_enabled' function. */ 26 | /* #undef HAVE_NC_USE_PARALLEL_ENABLED */ 27 | 28 | /* Define to 1 if you have the header file. */ 29 | #define HAVE_NETCDF_H 1 30 | 31 | /* Define to 1 if you have the header file. */ 32 | #define HAVE_STDINT_H 1 33 | 34 | /* Define to 1 if you have the header file. */ 35 | #define HAVE_STDLIB_H 1 36 | 37 | /* Define to 1 if you have the header file. */ 38 | #define HAVE_STRINGS_H 1 39 | 40 | /* Define to 1 if you have the header file. */ 41 | #define HAVE_STRING_H 1 42 | 43 | /* Define to 1 if you have the header file. */ 44 | #define HAVE_SYS_STAT_H 1 45 | 46 | /* Define to 1 if you have the header file. */ 47 | #define HAVE_SYS_TYPES_H 1 48 | 49 | /* Define to 1 if you have the header file. */ 50 | #define HAVE_UNISTD_H 1 51 | 52 | /* do large file tests */ 53 | /* #undef LARGE_FILE_TESTS */ 54 | 55 | /* Define to the sub-directory in which libtool stores uninstalled libraries. 56 | */ 57 | #define LT_OBJDIR ".libs/" 58 | 59 | /* Name of package */ 60 | #define PACKAGE "netcdf-cxx" 61 | 62 | /* Define to the address where bug reports for this package should be sent. */ 63 | #define PACKAGE_BUGREPORT "support-netcdf@unidata.ucar.edu" 64 | 65 | /* Define to the full name of this package. */ 66 | #define PACKAGE_NAME "netCDF-cxx" 67 | 68 | /* Define to the full name and version of this package. */ 69 | #define PACKAGE_STRING "netCDF-cxx 4.2" 70 | 71 | /* Define to the one symbol short name of this package. */ 72 | #define PACKAGE_TARNAME "netcdf-cxx" 73 | 74 | /* Define to the home page for this package. */ 75 | #define PACKAGE_URL "" 76 | 77 | /* Define to the version of this package. */ 78 | #define PACKAGE_VERSION "4.2" 79 | 80 | /* Define to 1 if you have the ANSI C header files. */ 81 | #define STDC_HEADERS 1 82 | 83 | /* Place to put very large netCDF test files. */ 84 | #define TEMP_LARGE "." 85 | 86 | /* Version number of package */ 87 | #define VERSION "4.2" 88 | 89 | /* Number of bits in a file offset, on hosts where this is settable. */ 90 | /* #undef _FILE_OFFSET_BITS */ 91 | 92 | /* Define for large files, on AIX-style hosts. */ 93 | /* #undef _LARGE_FILES */ 94 | -------------------------------------------------------------------------------- /src/netcdf-cxx-4.2/netcdf.hh: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /src/nodes/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND DETECT_NODES_FILES 7 | DetectNodes.cpp 8 | ) 9 | 10 | list(APPEND HISTOGRAM_NODES_FILES 11 | HistogramNodes.cpp 12 | ) 13 | 14 | list(APPEND NODE_FILE_COMPOSE_FILES 15 | NodeFileCompose.cpp 16 | ) 17 | 18 | list(APPEND NODE_FILE_EDITOR_FILES 19 | NodeFileEditor.cpp CalculationList.cpp 20 | ) 21 | 22 | list(APPEND NODE_FILE_FILTER_FILES 23 | NodeFileFilter.cpp 24 | ) 25 | 26 | list(APPEND STITCH_NODES_FILES 27 | StitchNodes.cpp 28 | ) 29 | 30 | include_directories( 31 | ${CMAKE_CURRENT_SOURCE_DIR} 32 | ${CMAKE_CURRENT_SOURCE_DIR}/../base 33 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 34 | ${NetCDF_C_INCLUDE_DIR} 35 | ${MPI_CXX_INCLUDE_DIRS} 36 | ) 37 | 38 | add_executable(DetectNodes ${DETECT_NODES_FILES}) 39 | target_link_libraries(DetectNodes PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 40 | 41 | add_executable(HistogramNodes ${HISTOGRAM_NODES_FILES}) 42 | target_link_libraries(HistogramNodes PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 43 | 44 | add_executable(NodeFileCompose ${NODE_FILE_COMPOSE_FILES}) 45 | target_link_libraries(NodeFileCompose PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 46 | 47 | add_executable(NodeFileEditor ${NODE_FILE_EDITOR_FILES}) 48 | target_link_libraries(NodeFileEditor PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 49 | 50 | add_executable(NodeFileFilter ${NODE_FILE_FILTER_FILES}) 51 | target_link_libraries(NodeFileFilter PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 52 | 53 | add_executable(StitchNodes ${STITCH_NODES_FILES}) 54 | target_link_libraries(StitchNodes PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 55 | 56 | install( 57 | TARGETS DetectNodes HistogramNodes NodeFileCompose NodeFileEditor NodeFileFilter StitchNodes 58 | RUNTIME DESTINATION bin 59 | ) 60 | -------------------------------------------------------------------------------- /src/nodes/CalculationList.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file CalculationList.h 4 | /// \author Paul Ullrich 5 | /// \version May 25, 2025 6 | 7 | #ifndef _CALCULATIONLIST_H_ 8 | #define _CALCULATIONLIST_H_ 9 | 10 | #include "Exception.h" 11 | 12 | #include 13 | #include 14 | 15 | /////////////////////////////////////////////////////////////////////////////// 16 | 17 | /// 18 | /// A single calculation command, including lhs, rhs and arguments. 19 | /// 20 | class CalculationCommand { 21 | 22 | public: 23 | /// 24 | /// Populate the CalculationCommand from a string. 25 | /// 26 | void Parse( 27 | const std::string & strCalculateCommand 28 | ); 29 | 30 | /// 31 | /// Get a string representation of this command. 32 | /// 33 | std::string ToString() const; 34 | 35 | public: 36 | /// 37 | /// Variable receiving the output. 38 | /// 39 | std::string lhs; 40 | 41 | /// 42 | /// Command to be executed. 43 | /// 44 | std::string rhs; 45 | 46 | /// 47 | /// Command arguments. 48 | /// 49 | std::vector arg; 50 | }; 51 | 52 | /////////////////////////////////////////////////////////////////////////////// 53 | 54 | /// 55 | /// A list of CalculationCommands. 56 | /// 57 | class CalculationList { 58 | 59 | public: 60 | /// 61 | /// Parse a list of calculate commands. 62 | /// 63 | void Parse( 64 | const std::string & strCalculateCommand 65 | ); 66 | 67 | /// 68 | /// Number of CalculationCommands. 69 | /// 70 | size_t size() const { 71 | return m_vecCommands.size(); 72 | } 73 | 74 | /// 75 | /// Get the specified CalculationCommand. 76 | /// 77 | const CalculationCommand & operator[](size_t s) const { 78 | if (s >= m_vecCommands.size()) { 79 | _EXCEPTION2("Index out of range %lu >= %lu)", s, m_vecCommands.size()); 80 | } 81 | return m_vecCommands[s]; 82 | } 83 | 84 | private: 85 | /// 86 | /// Vector of CalculationCommands. 87 | /// 88 | std::vector m_vecCommands; 89 | }; 90 | 91 | /////////////////////////////////////////////////////////////////////////////// 92 | 93 | #endif // _CALCULATIONLIST_H_ 94 | 95 | -------------------------------------------------------------------------------- /src/nodes/ClosedContourOp.h: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file ClosedContourOp.h 4 | /// \author Paul Ullrich 5 | /// \version February 6, 2020 6 | /// 7 | /// 8 | /// Copyright 2020 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #ifndef _CLOSEDCONTOUROP_H_ 18 | #define _CLOSEDCONTOUROP_H_ 19 | 20 | #include "Variable.h" 21 | 22 | #include 23 | 24 | /////////////////////////////////////////////////////////////////////////////// 25 | 26 | /// 27 | /// A class describing a general closed contour operation. 28 | /// 29 | class ClosedContourOp { 30 | 31 | public: 32 | /// 33 | /// Constructor. 34 | /// 35 | ClosedContourOp() : 36 | m_varix(InvalidVariableIndex), 37 | m_dDeltaAmount(0.0), 38 | m_dDistance(0.0), 39 | m_dMinMaxDist(0.0) 40 | { } 41 | 42 | public: 43 | /// 44 | /// Parse a closed contour operation string. 45 | /// 46 | void Parse( 47 | VariableRegistry & varreg, 48 | const std::string & strOp 49 | ) { 50 | // Read mode 51 | enum { 52 | ReadMode_Amount, 53 | ReadMode_Distance, 54 | ReadMode_MinMaxDist, 55 | ReadMode_Invalid 56 | } eReadMode = ReadMode_Amount; 57 | 58 | // Get variable information 59 | int iLast = varreg.FindOrRegisterSubStr(strOp, &m_varix) + 1; 60 | 61 | // Loop through string 62 | for (int i = iLast; i <= strOp.length(); i++) { 63 | 64 | // Comma-delineated 65 | if ((i == strOp.length()) || (strOp[i] == ',')) { 66 | 67 | std::string strSubStr = 68 | strOp.substr(iLast, i - iLast); 69 | 70 | // Read in amount 71 | if (eReadMode == ReadMode_Amount) { 72 | m_dDeltaAmount = atof(strSubStr.c_str()); 73 | 74 | iLast = i + 1; 75 | eReadMode = ReadMode_Distance; 76 | 77 | // Read in distance 78 | } else if (eReadMode == ReadMode_Distance) { 79 | m_dDistance = atof(strSubStr.c_str()); 80 | 81 | iLast = i + 1; 82 | eReadMode = ReadMode_MinMaxDist; 83 | 84 | // Read in min/max distance 85 | } else if (eReadMode == ReadMode_MinMaxDist) { 86 | m_dMinMaxDist = atof(strSubStr.c_str()); 87 | 88 | iLast = i + 1; 89 | eReadMode = ReadMode_Invalid; 90 | 91 | // Invalid 92 | } else if (eReadMode == ReadMode_Invalid) { 93 | _EXCEPTION1("\nToo many entries in closed contour op \"%s\"" 94 | "\nRequired: \",,," 95 | "\"", 96 | strOp.c_str()); 97 | } 98 | } 99 | } 100 | 101 | if (eReadMode != ReadMode_Invalid) { 102 | _EXCEPTION1("\nInsufficient entries in closed contour op \"%s\"" 103 | "\nRequired: \",,," 104 | "\"", 105 | strOp.c_str()); 106 | } 107 | 108 | // Output announcement 109 | if (m_dDeltaAmount == 0.0) { 110 | _EXCEPTIONT("For closed contour op, delta amount must be non-zero"); 111 | } 112 | if (m_dDistance <= 0.0) { 113 | _EXCEPTIONT("For closed contour op, distance must be positive"); 114 | } 115 | if (m_dMinMaxDist < 0.0) { 116 | _EXCEPTIONT("For closed contour op, min/max dist must be nonnegative"); 117 | } 118 | 119 | if (m_dDeltaAmount < 0.0) { 120 | Announce("%s decreases by %f over %f degrees" 121 | " (max search %f deg)", 122 | varreg.GetVariableString(m_varix).c_str(), 123 | -m_dDeltaAmount, 124 | m_dDistance, 125 | m_dMinMaxDist); 126 | 127 | } else { 128 | Announce("%s increases by %f over %f degrees" 129 | " (min search %f deg)", 130 | varreg.GetVariableString(m_varix).c_str(), 131 | m_dDeltaAmount, 132 | m_dDistance, 133 | m_dMinMaxDist); 134 | } 135 | } 136 | 137 | public: 138 | /// 139 | /// Variable to use for closed contour op. 140 | /// 141 | VariableIndex m_varix; 142 | 143 | /// 144 | /// Threshold amount. If positive this represents a minimum 145 | /// increase. If negative this represents a minimum decrease. 146 | /// 147 | double m_dDeltaAmount; 148 | 149 | /// 150 | /// Threshold distance (in degrees). 151 | /// 152 | double m_dDistance; 153 | 154 | /// 155 | /// Distance to search for min or max (in degrees). 156 | /// 157 | double m_dMinMaxDist; 158 | }; 159 | 160 | /////////////////////////////////////////////////////////////////////////////// 161 | 162 | #endif // _CLOSEDCONTOUROP_H_ 163 | 164 | -------------------------------------------------------------------------------- /src/sandbox/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND SPINE_ARS_FILES 7 | SpineARs.cpp 8 | ) 9 | 10 | list(APPEND COMPRESS_BLOBS_FILES 11 | CompressBlobs.cpp 12 | ) 13 | 14 | include_directories( 15 | ${CMAKE_CURRENT_SOURCE_DIR} 16 | ${CMAKE_CURRENT_SOURCE_DIR}/../base 17 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 18 | ${NetCDF_C_INCLUDE_DIR} 19 | ${MPI_CXX_INCLUDE_DIRS} 20 | ) 21 | 22 | add_executable(SpineARs ${SPINE_ARS_FILES}) 23 | target_link_libraries(SpineARs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 24 | 25 | add_executable(CompressBlobs ${COMPRESS_BLOBS_FILES}) 26 | target_link_libraries(CompressBlobs PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 27 | 28 | install( 29 | TARGETS SpineARs CompressBlobs 30 | RUNTIME DESTINATION bin 31 | ) 32 | -------------------------------------------------------------------------------- /src/util/AutoCuratorTool.cpp: -------------------------------------------------------------------------------- 1 | /////////////////////////////////////////////////////////////////////////////// 2 | /// 3 | /// \file AutoCuratorTool.cpp 4 | /// \author Paul Ullrich 5 | /// \version January 9, 2024 6 | /// 7 | /// 8 | /// Copyright 2024 Paul Ullrich 9 | /// 10 | /// This file is distributed as part of the Tempest source code package. 11 | /// Permission is granted to use, copy, modify and distribute this 12 | /// source code and its documentation under the terms of the GNU General 13 | /// Public License. This software is provided "as is" without express 14 | /// or implied warranty. 15 | /// 16 | 17 | #include "CommandLine.h" 18 | #include "Exception.h" 19 | #include "Announce.h" 20 | #include "AutoCurator.h" 21 | 22 | #include 23 | 24 | /////////////////////////////////////////////////////////////////////////////// 25 | 26 | int main(int argc, char** argv) { 27 | 28 | // Turn off fatal errors in NetCDF 29 | NcError error(NcError::silent_nonfatal); 30 | 31 | try { 32 | 33 | // Input data 34 | std::string strInputData; 35 | 36 | // Input data list 37 | std::string strInputDataList; 38 | 39 | // Add a calendar attribute 40 | std::string strCalendarName; 41 | 42 | // Output file 43 | std::string strOutputFile; 44 | 45 | // Parse the command line 46 | BeginCommandLine() 47 | CommandLineString(strInputData, "in_data", ""); 48 | CommandLineString(strInputDataList, "in_data_list", ""); 49 | CommandLineString(strCalendarName, "add_calendar", ""); 50 | CommandLineString(strOutputFile, "out_index", ""); 51 | 52 | ParseCommandLine(argc, argv); 53 | EndCommandLine(argv) 54 | 55 | AnnounceBanner(); 56 | 57 | // Check command line arguments 58 | if ((strInputData.length() == 0) && (strInputDataList.length() == 0)) { 59 | _EXCEPTIONT("No input data file (--in_data) or (--in_data_list)" 60 | " specified"); 61 | } 62 | if ((strInputData.length() != 0) && (strInputDataList.length() != 0)) { 63 | _EXCEPTIONT("Only one of (--in_data) or (--in_data_list)" 64 | " may be specified"); 65 | } 66 | if (strOutputFile.length() == 0) { 67 | _EXCEPTIONT("No output index file (--out_index) specified"); 68 | } 69 | 70 | // Create autocurator 71 | AutoCurator autocurator; 72 | 73 | // Set calendar manually 74 | if (strCalendarName.length() != 0) { 75 | autocurator.SetCalendar(Time::CalendarTypeFromString(strCalendarName)); 76 | } 77 | 78 | // Curate input data 79 | if (strInputData.length() != 0) { 80 | AnnounceStartBlock("Autocurating in_data"); 81 | autocurator.IndexFiles(strInputData); 82 | 83 | } else if (strInputDataList.length() != 0) { 84 | AnnounceStartBlock("Autocurating in_data_list"); 85 | std::ifstream ifInputDataList(strInputDataList.c_str()); 86 | if (!ifInputDataList.is_open()) { 87 | _EXCEPTION1("Unable to open file \"%s\"", 88 | strInputDataList.c_str()); 89 | } 90 | std::string strFileLine; 91 | while (std::getline(ifInputDataList, strFileLine)) { 92 | if (strFileLine.length() == 0) { 93 | continue; 94 | } 95 | if (strFileLine[0] == '#') { 96 | continue; 97 | } 98 | Announce(strFileLine.c_str()); 99 | autocurator.IndexFiles(strFileLine); 100 | } 101 | } 102 | AnnounceEndBlock("Done"); 103 | 104 | // Write to file 105 | autocurator.ToYAMLFile(strOutputFile); 106 | /* 107 | // Read from file 108 | AutoCurator autocurator2; 109 | autocurator2.FromYAMLFile(strOutputFile); 110 | autocurator2.ToYAMLFile("test.txt"); 111 | */ 112 | AnnounceBanner(); 113 | 114 | } catch(Exception & e) { 115 | Announce(e.ToString().c_str()); 116 | } 117 | } 118 | 119 | /////////////////////////////////////////////////////////////////////////////// 120 | 121 | -------------------------------------------------------------------------------- /src/util/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Kian Huang, Hongyu Chen 2 | # 3 | # Distributed under the Boost Software License, Version 1.0. (See accompanying 4 | # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 5 | 6 | list(APPEND ACCUMULATE_DATA_FILES 7 | AccumulateData.cpp 8 | ) 9 | 10 | list(APPEND ACCUMULATE_ERA5_FORECAST_FILES 11 | AccumulateERA5Forecast.cpp 12 | ) 13 | 14 | list(APPEND AUTO_CURATOR_TOOL_FILES 15 | AutoCuratorTool.cpp 16 | ) 17 | 18 | list(APPEND CLIMATOLOGY_FILES 19 | Climatology.cpp 20 | ) 21 | 22 | list(APPEND FOURIER_FILTER_FILES 23 | FourierFilter.cpp 24 | ) 25 | 26 | list(APPEND GENERATE_CONNECTIVITY_FILES 27 | GenerateConnectivityFile.cpp 28 | ) 29 | 30 | list(APPEND GENERATE_NEAREST_NEIGHBOR_MAP_FILES 31 | GenerateNearestNeighborMap.cpp 32 | ) 33 | 34 | list(APPEND INTEGRATE_DIMENSION_FILES 35 | IntegrateDimension.cpp 36 | ) 37 | 38 | list(APPEND LAGRANGIAN_PARCEL_TRACKER_FILES 39 | LagrangianParcelTracker.cpp 40 | ) 41 | 42 | list(APPEND QUANTILE_CALCULATOR_FILES 43 | QuantileCalculator.cpp 44 | ) 45 | 46 | list(APPEND SHAPEFILE_MASK_FILES 47 | ShapefileMask.cpp 48 | ) 49 | 50 | list(APPEND VARIABLE_PROCESSOR_FILES 51 | VariableProcessor.cpp 52 | ) 53 | 54 | include_directories( 55 | ${CMAKE_CURRENT_SOURCE_DIR} 56 | ${CMAKE_CURRENT_SOURCE_DIR}/../base 57 | ${CMAKE_CURRENT_SOURCE_DIR}/../blocking 58 | ${CMAKE_CURRENT_SOURCE_DIR}/../netcdf-cxx-4.2 59 | ${NetCDF_C_INCLUDE_DIR} 60 | ${MPI_CXX_INCLUDE_DIRS} 61 | ) 62 | 63 | add_executable(AccumulateData ${ACCUMULATE_DATA_FILES}) 64 | target_link_libraries(AccumulateData PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 65 | 66 | add_executable(AccumulateERA5Forecast ${ACCUMULATE_ERA5_FORECAST_FILES}) 67 | target_link_libraries(AccumulateERA5Forecast PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 68 | 69 | add_executable(AutoCuratorTool ${AUTO_CURATOR_TOOL_FILES}) 70 | target_link_libraries(AutoCuratorTool PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 71 | 72 | add_executable(Climatology ${CLIMATOLOGY_FILES}) 73 | target_link_libraries(Climatology PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 74 | 75 | add_executable(FourierFilter ${FOURIER_FILTER_FILES}) 76 | target_link_libraries(FourierFilter PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 77 | 78 | add_executable(GenerateConnectivityFile ${GENERATE_CONNECTIVITY_FILES}) 79 | target_link_libraries(GenerateConnectivityFile PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 80 | 81 | add_executable(GenerateNearestNeighborMap ${GENERATE_NEAREST_NEIGHBOR_MAP_FILES}) 82 | target_link_libraries(GenerateNearestNeighborMap PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 83 | 84 | add_executable(IntegrateDimension ${INTEGRATE_DIMENSION_FILES}) 85 | target_link_libraries(IntegrateDimension PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 86 | 87 | add_executable(LagrangianParcelTracker ${LAGRANGIAN_PARCEL_TRACKER_FILES}) 88 | target_link_libraries(LagrangianParcelTracker PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 89 | 90 | add_executable(QuantileCalculator ${QUANTILE_CALCULATOR_FILES}) 91 | target_link_libraries(QuantileCalculator PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 92 | 93 | add_executable(ShapefileMask ${SHAPEFILE_MASK_FILES}) 94 | target_link_libraries(ShapefileMask PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 95 | 96 | add_executable(VariableProcessor ${VARIABLE_PROCESSOR_FILES}) 97 | target_link_libraries(VariableProcessor PUBLIC extremesbase netcdf_c++ ${MPI_CXX_LIBRARIES}) 98 | 99 | install( 100 | TARGETS 101 | AccumulateData 102 | AccumulateERA5Forecast 103 | AutoCuratorTool 104 | Climatology 105 | FourierFilter 106 | GenerateConnectivityFile 107 | GenerateNearestNeighborMap 108 | IntegrateDimension 109 | LagrangianParcelTracker 110 | QuantileCalculator 111 | ShapefileMask 112 | VariableProcessor 113 | RUNTIME DESTINATION bin 114 | ) 115 | 116 | -------------------------------------------------------------------------------- /test/control_shell/CLIVAR_BLOBS_GH_LOOP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Automate the calculation of the blobs files + stats 4 | 5 | SEASONS=("MAM" "JJA" "SON" "DJF") 6 | #DATA=("climo" "2xCO2" "SSTplus2" "SSTplus2_2xCO2") 7 | DATA=("climo" "2xCO2" "SSTplus2") 8 | mstart=(3 6 9 12) 9 | BLOBS="TRUE" 10 | #BLOTS="FALSE" 11 | #Addition of regional parameters 12 | SECTOR=("NA" "NC" "NP" "SA" "SI" "SP") 13 | LEFT_BOUND=(250 30 130 290 20 120) 14 | RIGHT_BOUND=(50 150 270 40 140 310) 15 | MIN_LAT=(25 25 25 -75 -75 -75) 16 | MAX_LAT=(75 75 75 -25 -25 -25) 17 | 18 | #SECTOR=("NC" "NP" "SI" "SP") 19 | #LEFT_BOUND=(30 130 20 120) 20 | #RIGHT_BOUND=(150 260 140 310) 21 | #MIN_LAT=(25 25 -75 -75) 22 | #MAX_LAT=(75 75 -25 -25) 23 | 24 | if [ "$BLOBS" == "TRUE" ]; then 25 | for d in ${DATA[@]}; do 26 | cd $SCRATCH/$d/data 27 | bdir="$SCRATCH/$d/blobs" 28 | if [ ! -e $bdir ]; then 29 | mkdir -p $bdir 30 | fi 31 | for y in {2..23}; do 32 | i=0 33 | for s in ${SEASONS[@]}; do 34 | yf=$(printf "%04d" $y) 35 | m=${mstart[i]} 36 | mf=$(printf "%02d" $m) 37 | 38 | if [ $m -eq 12 ]; then 39 | ls *$yf-$mf*z500_devs.nc > blobzlist 40 | yf=$(printf "%04d" $((y+1))) 41 | ls *$yf-0[12]*z500_devs.nc >> blobzlist 42 | else 43 | mstring=$(printf "*$yf-{%02d,%02d,%02d}*z500_devs.nc" $m $((m+1)) $((m+2))) 44 | echo "ls $mstring" | sh > blobzlist 45 | fi 46 | #run StitchBlobs 47 | #New addition! Regional stitching 48 | for n in {0..5}; do 49 | #for n in {0..3}; do 50 | secname=${SECTOR[n]} 51 | 52 | echo "$s""_""$secname""_""$yf" 53 | #blobsname="$bdir/$s""_""$yf""_blobs_""$d.nc" 54 | #statsname="$bdir/$s""_""$yf""_stats_""$d.txt" 55 | #densname="$bdir/$s""_""$yf""_dens_""$d.nc" 56 | blobsname="$bdir/$s""_""$yf""_""$secname""_zblobs_""$d.nc" 57 | statsname="$bdir/$s""_""$yf""_""$secname""_zstats_""$d.txt" 58 | densname="$bdir/$s""_""$yf""_""$secname""_zdens_""$d.nc" 59 | ~/tempestextremes/bin/StitchBlobs --inlist blobzlist --out $blobsname --var INT_ADGH --outvar GH_BLOB --mintime 40 --minlat ${MIN_LAT[n]} --maxlat ${MAX_LAT[n]} --minlon ${LEFT_BOUND[n]} --maxlon ${RIGHT_BOUND[n]} 60 | ~/tempestextremes/bin/BlobStats --infile $blobsname --outfile $statsname --invar GH_BLOB --out minlat,maxlat,minlon,maxlon,centlat,centlon,area 61 | ~/tempestextremes/bin/DensityCalculations --in $blobsname --var GH_BLOB --out $densname 62 | n=$((n+1)) 63 | done 64 | i=$((i+1)) 65 | done 66 | done 67 | done 68 | 69 | #removing the files that contain missing dates! 70 | #climo dataset 71 | #rm $SCRATCH/climo/blobs/DJF_0004_* 72 | #rm $SCRATCH/climo/blobs/DJF_0010_* 73 | #rm $SCRATCH/climo/blobs/JJA_0012_* 74 | #rm $SCRATCH/climo/blobs/SON_0020_* 75 | 76 | #2xCO2 dataset 77 | #rm $SCRATCH/2xCO2/blobs/SON_0007_* 78 | 79 | #SSTplus2 dataset 80 | #rm $SCRATCH/SSTplus2/blobs/SON_0016_* 81 | 82 | #SSTplus2_2xCO2 dataset 83 | #rm $SCRATCH/SSTplus2_2xCO2/blobs/DJF_0007_* 84 | #rm $SCRATCH/SSTplus2_2xCO2/blobs/DJF_0016_* 85 | 86 | fi 87 | 88 | 89 | #Now run average density calculations 90 | #for d in ${DATA[@]}; do 91 | # cd $SCRATCH/$d/blobs 92 | # for s in ${SEASONS[@]}; do 93 | # lsname=$s"_blobs" 94 | # outname=$s"_avg_dens_"$d".nc" 95 | # ls $s*blobs*.nc > $lsname 96 | # numfiles=$(cat $lsname | wc -l) 97 | # ~/tempestextremes/bin/DensityCalculations --std --inlist $lsname --var PV_BLOB --out $outname 98 | # python ~/tempestextremes/test/plot_density.py $outname "$d $numfiles yr $s" "avg" "PV blocking" 99 | # done 100 | #done 101 | -------------------------------------------------------------------------------- /test/control_shell/CLIVAR_BLOBS_PV_LOOP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Automate the calculation of the blobs files + stats 4 | 5 | SEASONS=("MAM" "JJA" "SON" "DJF") 6 | DATA=("climo" "2xCO2" "SSTplus2" "SSTplus2_2xCO2") 7 | mstart=(3 6 9 12) 8 | BLOBS="TRUE" 9 | 10 | #Addition of regional parameters 11 | SECTOR=("NA" "NC" "NP" "SA" "SI" "SP") 12 | LEFT_BOUND=(250 30 130 290 20 120) 13 | RIGHT_BOUND=(50 150 270 40 140 310) 14 | MIN_LAT=(25 25 25 -75 -75 -75) 15 | MAX_LAT=(75 75 75 -25 -25 -25) 16 | 17 | #SECTOR=("NC" "NP" "SI" "SP") 18 | #LEFT_BOUND=(30 130 20 120) 19 | #RIGHT_BOUND=(150 260 140 310) 20 | #MIN_LAT=(25 25 -75 -75) 21 | #MAX_LAT=(75 75 -25 -25) 22 | 23 | if [ "$BLOBS" == "TRUE" ]; then 24 | for d in ${DATA[@]}; do 25 | cd $SCRATCH/$d/data 26 | bdir="$SCRATCH/$d/blobs" 27 | if [ ! -e $bdir ]; then 28 | mkdir -p $bdir 29 | fi 30 | for y in {2..23}; do 31 | i=0 32 | for s in ${SEASONS[@]}; do 33 | yf=$(printf "%04d" $y) 34 | m=${mstart[i]} 35 | mf=$(printf "%02d" $m) 36 | 37 | if [ $m -eq 12 ]; then 38 | ls *$yf-$mf*integ_devs.nc > bloblist 39 | yf=$(printf "%04d" $((y+1))) 40 | ls *$yf-0[12]*integ_devs.nc >> bloblist 41 | else 42 | mstring=$(printf "*$yf-{%02d,%02d,%02d}*integ_devs.nc" $m $((m+1)) $((m+2))) 43 | echo "ls $mstring" | sh > bloblist 44 | fi 45 | #run StitchBlobs 46 | #New addition! Regional stitching 47 | for n in {0..5}; do 48 | #for n in {0..3}; do 49 | secname=${SECTOR[n]} 50 | 51 | echo "$s""_""$secname""_""$yf" 52 | #blobsname="$bdir/$s""_""$yf""_blobs_""$d.nc" 53 | #statsname="$bdir/$s""_""$yf""_stats_""$d.txt" 54 | #densname="$bdir/$s""_""$yf""_dens_""$d.nc" 55 | blobsname="$bdir/$s""_""$yf""_""$secname""_blobs_""$d.nc" 56 | statsname="$bdir/$s""_""$yf""_""$secname""_stats_""$d.txt" 57 | densname="$bdir/$s""_""$yf""_""$secname""_dens_""$d.nc" 58 | ~/tempestextremes/bin/StitchBlobs --inlist bloblist --out $blobsname --var INT_ADIPV --outvar PV_BLOB --mintime 40 --minlat ${MIN_LAT[n]} --maxlat ${MAX_LAT[n]} --minlon ${LEFT_BOUND[n]} --maxlon ${RIGHT_BOUND[n]} 59 | ~/tempestextremes/bin/BlobStats --infile $blobsname --outfile $statsname --invar PV_BLOB --out minlat,maxlat,minlon,maxlon,centlat,centlon,area 60 | ~/tempestextremes/bin/DensityCalculations --in $blobsname --var PV_BLOB --out $densname 61 | n=$((n+1)) 62 | done 63 | i=$((i+1)) 64 | done 65 | done 66 | done 67 | 68 | #removing the files that contain missing dates! 69 | #climo dataset 70 | rm $SCRATCH/climo/blobs/DJF_0004_* 71 | rm $SCRATCH/climo/blobs/DJF_0010_* 72 | rm $SCRATCH/climo/blobs/JJA_0012_* 73 | rm $SCRATCH/climo/blobs/SON_0020_* 74 | 75 | #2xCO2 dataset 76 | rm $SCRATCH/2xCO2/blobs/SON_0007_* 77 | 78 | #SSTplus2 dataset 79 | rm $SCRATCH/SSTplus2/blobs/SON_0016_* 80 | 81 | #SSTplus2_2xCO2 dataset 82 | rm $SCRATCH/SSTplus2_2xCO2/blobs/DJF_0007_* 83 | rm $SCRATCH/SSTplus2_2xCO2/blobs/DJF_0016_* 84 | 85 | fi 86 | 87 | 88 | #Now run average density calculations 89 | #for d in ${DATA[@]}; do 90 | # cd $SCRATCH/$d/blobs 91 | # for s in ${SEASONS[@]}; do 92 | # lsname=$s"_blobs" 93 | # outname=$s"_avg_dens_"$d".nc" 94 | # ls $s*blobs*.nc > $lsname 95 | # numfiles=$(cat $lsname | wc -l) 96 | # ~/tempestextremes/bin/DensityCalculations --std --inlist $lsname --var PV_BLOB --out $outname 97 | # python ~/tempestextremes/test/plot_density.py $outname "$d $numfiles yr $s" "avg" "PV blocking" 98 | # done 99 | #done 100 | -------------------------------------------------------------------------------- /test/control_shell/ERA_BLOBS_CONST_LOOP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SEASONS=("MAM" "JJA" "SON" "DJF") 4 | mstart=(3 6 9 12) 5 | ystart=1980 6 | ycalc=1987 7 | yend=2000 8 | DDIR=/Volumes/ExFAT_drive/ERA_files 9 | BDIR=$DDIR/ERA_blobs 10 | #VAR="IPV" 11 | VARVEC=("IPV" "Z") 12 | 13 | #Addition of regional parameters 14 | SECTOR=("NA" "NC" "NP" "SA" "SI" "SP") 15 | LEFT_BOUND=(250 30 130 290 20 120) 16 | RIGHT_BOUND=(50 150 270 40 140 310) 17 | MIN_LAT=(25 25 25 -75 -75 -75) 18 | MAX_LAT=(75 75 75 -25 -25 -25) 19 | 20 | SUFF="" 21 | BLOB_SUFF="" 22 | STAT_SUFF="" 23 | DENS_SUFF="" 24 | INVAR="" 25 | BLOB_VAR="" 26 | PLOT_TITLE="" 27 | for VAR in ${VARVEC[@]}; do 28 | if [ "$VAR" == "IPV" ]; then 29 | SUFF="integ_devs_norm_const.nc" 30 | BLOB_SUFF="blobs_const.nc" 31 | STAT_SUFF="stats_const.txt" 32 | DENS_SUFF="dens_const.nc" 33 | INVAR="INT_ADIPV" 34 | BLOB_VAR="PV_BLOB" 35 | PLOT_TITLE="PV blocking" 36 | elif [ "$VAR" == "Z" ]; then 37 | SUFF="z500_devs_norm_const.nc" 38 | BLOB_SUFF="Zblobs_const.nc" 39 | STAT_SUFF="Zstats_const.txt" 40 | DENS_SUFF="Zdens_const.nc" 41 | INVAR="INT_ADGH" 42 | BLOB_VAR="Z_BLOB" 43 | PLOT_TITLE="Z blocking" 44 | elif [ "$VAR" == "GHGrad" ]; then 45 | SUFF="z500_GHG.nc" 46 | BLOB_SUFF="GHGblobs.nc" 47 | STAT_SUFF="GHGstats.txt" 48 | DENS_SUFF="GHGdens.nc" 49 | INVAR="GHGrad" 50 | BLOB_VAR="GHG_BLOB" 51 | PLOT_TITLE="GHG blocking" 52 | fi 53 | 54 | 55 | 56 | 57 | 58 | if [ ! -e $BDIR ]; then 59 | mkdir -p $BDIR 60 | fi 61 | 62 | for ((y=1980; y<=2005; y++)); do 63 | i=0 64 | SUBDIR=$DDIR/ERA_$y 65 | echo "Entering subdirectory $SUBDIR" 66 | if [ ! -e $SUBDIR ]; then 67 | echo "Missing directory. Check connection." 68 | exit 69 | fi 70 | for s in ${SEASONS[@]}; do 71 | 72 | if [ "$s" == "DJF" ]; then 73 | echo "DJF!" 74 | mstring="$SUBDIR/ERA*_12_vars_$SUFF" 75 | echo "ls $mstring" | sh > bloblist 76 | 77 | yn=$((y+1)) 78 | SUBDIR2=$DDIR/ERA_$yn 79 | 80 | mstring="$SUBDIR2/ERA*_0[12]_vars_$SUFF" 81 | echo "ls $mstring" | sh >>bloblist 82 | # if [ "$VAR" == "IPV" ]; then 83 | # ls $SUBDIR/*_12_vars_integ_devs.nc > bloblist 84 | # fi 85 | # else if [ "$VAR" == "GH" ]; then 86 | # ls $SUBDIR/*_12_vars_GH_devs.nc > bloblist 87 | # fi 88 | 89 | # if [ "$VAR" == "IPV" ]; then 90 | # ls $SUBDIR2/*_0[12]_vars_integ_devs.nc >> bloblist 91 | # fi 92 | # else if [ "$VAR" == "GH" ]; then 93 | # ls $SUBDIR2/*_0[12]_vars_GH_devs.nc >> bloblist 94 | # fi 95 | 96 | else 97 | m=${mstart[i]} 98 | # m1=${mstart[i]} 99 | # m2=$((m1+2)) 100 | # echo "m1 is $m1 and m2 is $m2" 101 | # echo "ls $SUBDIR/*_*{$m1..$m2}_*devs.nc" | sh > bloblist 102 | 103 | mstring=$(printf "$SUBDIR/ERA*_{%02d,%02d,%02d}_vars_$SUFF" $m $((m+1)) $((m+2))) 104 | echo $mstring 105 | echo "ls $mstring" | sh > bloblist 106 | fi 107 | cat bloblist 108 | for n in {0..5}; do 109 | secname=${SECTOR[n]} 110 | # blobsname="$BDIR/ERA_"$y"_"$s"_"$BLOB_SUFF 111 | # statsname="$BDIR/ERA_"$y"_"$s"_"$STAT_SUFF 112 | # densname="$BDIR/ERA_"$y"_"$s"_"$DENS_SUFF 113 | # vdensname="$BDIR/ERA_"$y"_"$s"_var_"$DENS_SUFF 114 | 115 | blobsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$BLOB_SUFF 116 | statsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$STAT_SUFF 117 | densname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$DENS_SUFF 118 | vdensname="$BDIR/ERA_"$y"_"$s"_var_""$secname""_"$DENS_SUFF 119 | 120 | ~/tempestextremes/bin/StitchBlobs --inlist bloblist --out $blobsname --var $INVAR --outvar $BLOB_VAR --mintime 20 --thresholdcmd "minarea,1000000000000" --minlat ${MIN_LAT[n]} --maxlat ${MAX_LAT[n]} --minlon ${LEFT_BOUND[n]} --maxlon ${RIGHT_BOUND[n]} 121 | ~/tempestextremes/bin/BlobStats --infile $blobsname --outfile $statsname --invar $BLOB_VAR --out minlat,maxlat,minlon,maxlon,centlat,centlon,area 122 | ~/tempestextremes/bin/DensityCalculations --in $blobsname --var $BLOB_VAR --out $densname 123 | # ~/tempestextremes/bin/DensityCalculations --inlist bloblist --var $INVAR --out $vdensname 124 | n=$((n+1)) 125 | done 126 | i=$((i+1)) 127 | done 128 | done 129 | done 130 | #cd $BDIR 131 | #for s in ${SEASONS[@]}; do 132 | # lsname="ERA_"$s"_blobs" 133 | # if [ -e $lsname ]; then 134 | # rm $lsname 135 | # fi 136 | # outname="$BDIR/ERA_"$s"_avg_"$DENS_SUFF 137 | # for ((y=ystart; y<=yend; y++)); do 138 | # echo "ls ERA_"$y"_"$s"_"$BLOB_SUFF" >> $lsname" | sh 139 | # done 140 | # cat $lsname 141 | 142 | # numfiles=$(cat $lsname | wc -l) 143 | # n=$((numfiles)) 144 | # echo "There are $n files" 145 | # ~/tempestextremes/bin/DensityCalculations --std --inlist $lsname --var $BLOB_VAR --out $outname 146 | # python ~/tempestextremes/test/plot_density.py $outname "ERA $n yr $s" "avg" "$PLOT_TITLE" 147 | #done 148 | 149 | #cp /Volumes/ExFAT_drive/ERA_files/ERA_blobs/ERA*plot.png ~/figs/ 150 | -------------------------------------------------------------------------------- /test/control_shell/ERA_BLOBS_LOOP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SEASONS=("MAM" "JJA" "SON" "DJF") 4 | mstart=(3 6 9 12) 5 | ystart=1980 6 | ycalc=1987 7 | yend=2000 8 | DDIR=/Volumes/ExFAT_drive/ERA_files 9 | BDIR=$DDIR/ERA_blobs 10 | #VAR="IPV" 11 | VARVEC=("IPV" "Z" "GHGrad") 12 | 13 | #Addition of regional parameters 14 | SECTOR=("NA" "NC" "NP" "SA" "SI" "SP") 15 | LEFT_BOUND=(250 30 130 290 20 120) 16 | RIGHT_BOUND=(50 150 270 40 140 310) 17 | MIN_LAT=(25 25 25 -75 -75 -75) 18 | MAX_LAT=(75 75 75 -25 -25 -25) 19 | 20 | SUFF="" 21 | BLOB_SUFF="" 22 | STAT_SUFF="" 23 | DENS_SUFF="" 24 | INVAR="" 25 | BLOB_VAR="" 26 | PLOT_TITLE="" 27 | for VAR in ${VARVEC[@]}; do 28 | if [ "$VAR" == "IPV" ]; then 29 | SUFF="integ_devs_norm.nc" 30 | BLOB_SUFF="blobs.nc" 31 | STAT_SUFF="stats.txt" 32 | DENS_SUFF="dens.nc" 33 | INVAR="INT_ADIPV" 34 | BLOB_VAR="PV_BLOB" 35 | PLOT_TITLE="PV blocking" 36 | elif [ "$VAR" == "Z" ]; then 37 | SUFF="z500_devs_norm.nc" 38 | BLOB_SUFF="Zblobs.nc" 39 | STAT_SUFF="Zstats.txt" 40 | DENS_SUFF="Zdens.nc" 41 | INVAR="INT_ADGH" 42 | BLOB_VAR="Z_BLOB" 43 | PLOT_TITLE="Z blocking" 44 | elif [ "$VAR" == "GHGrad" ]; then 45 | SUFF="z500_GHG.nc" 46 | BLOB_SUFF="GHGblobs.nc" 47 | STAT_SUFF="GHGstats.txt" 48 | DENS_SUFF="GHGdens.nc" 49 | INVAR="GHGrad" 50 | BLOB_VAR="GHG_BLOB" 51 | PLOT_TITLE="GHG blocking" 52 | fi 53 | 54 | 55 | 56 | 57 | 58 | if [ ! -e $BDIR ]; then 59 | mkdir -p $BDIR 60 | fi 61 | 62 | for ((y=1980; y<=2005; y++)); do 63 | i=0 64 | SUBDIR=$DDIR/ERA_$y 65 | echo "Entering subdirectory $SUBDIR" 66 | if [ ! -e $SUBDIR ]; then 67 | echo "Missing directory. Check connection." 68 | exit 69 | fi 70 | for s in ${SEASONS[@]}; do 71 | 72 | if [ "$s" == "DJF" ]; then 73 | echo "DJF!" 74 | mstring="$SUBDIR/ERA*_12_vars_$SUFF" 75 | echo "ls $mstring" | sh > bloblist 76 | 77 | yn=$((y+1)) 78 | SUBDIR2=$DDIR/ERA_$yn 79 | 80 | mstring="$SUBDIR2/ERA*_0[12]_vars_$SUFF" 81 | echo "ls $mstring" | sh >>bloblist 82 | # if [ "$VAR" == "IPV" ]; then 83 | # ls $SUBDIR/*_12_vars_integ_devs.nc > bloblist 84 | # fi 85 | # else if [ "$VAR" == "GH" ]; then 86 | # ls $SUBDIR/*_12_vars_GH_devs.nc > bloblist 87 | # fi 88 | 89 | # if [ "$VAR" == "IPV" ]; then 90 | # ls $SUBDIR2/*_0[12]_vars_integ_devs.nc >> bloblist 91 | # fi 92 | # else if [ "$VAR" == "GH" ]; then 93 | # ls $SUBDIR2/*_0[12]_vars_GH_devs.nc >> bloblist 94 | # fi 95 | 96 | else 97 | m=${mstart[i]} 98 | # m1=${mstart[i]} 99 | # m2=$((m1+2)) 100 | # echo "m1 is $m1 and m2 is $m2" 101 | # echo "ls $SUBDIR/*_*{$m1..$m2}_*devs.nc" | sh > bloblist 102 | 103 | mstring=$(printf "$SUBDIR/ERA*_{%02d,%02d,%02d}_vars_$SUFF" $m $((m+1)) $((m+2))) 104 | echo $mstring 105 | echo "ls $mstring" | sh > bloblist 106 | fi 107 | cat bloblist 108 | for n in {0..5}; do 109 | secname=${SECTOR[n]} 110 | # blobsname="$BDIR/ERA_"$y"_"$s"_"$BLOB_SUFF 111 | # statsname="$BDIR/ERA_"$y"_"$s"_"$STAT_SUFF 112 | # densname="$BDIR/ERA_"$y"_"$s"_"$DENS_SUFF 113 | # vdensname="$BDIR/ERA_"$y"_"$s"_var_"$DENS_SUFF 114 | 115 | blobsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$BLOB_SUFF 116 | statsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$STAT_SUFF 117 | densname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$DENS_SUFF 118 | vdensname="$BDIR/ERA_"$y"_"$s"_var_""$secname""_"$DENS_SUFF 119 | 120 | ~/tempestextremes/bin/StitchBlobs --inlist bloblist --out $blobsname --var $INVAR --outvar $BLOB_VAR --mintime 20 --thresholdcmd "minarea,1000000000000" --minlat ${MIN_LAT[n]} --maxlat ${MAX_LAT[n]} --minlon ${LEFT_BOUND[n]} --maxlon ${RIGHT_BOUND[n]} 121 | ~/tempestextremes/bin/BlobStats --infile $blobsname --outfile $statsname --invar $BLOB_VAR --out minlat,maxlat,minlon,maxlon,centlat,centlon,area 122 | ~/tempestextremes/bin/DensityCalculations --in $blobsname --var $BLOB_VAR --out $densname 123 | # ~/tempestextremes/bin/DensityCalculations --inlist bloblist --var $INVAR --out $vdensname 124 | n=$((n+1)) 125 | done 126 | i=$((i+1)) 127 | done 128 | done 129 | done 130 | #cd $BDIR 131 | #for s in ${SEASONS[@]}; do 132 | # lsname="ERA_"$s"_blobs" 133 | # if [ -e $lsname ]; then 134 | # rm $lsname 135 | # fi 136 | # outname="$BDIR/ERA_"$s"_avg_"$DENS_SUFF 137 | # for ((y=ystart; y<=yend; y++)); do 138 | # echo "ls ERA_"$y"_"$s"_"$BLOB_SUFF" >> $lsname" | sh 139 | # done 140 | # cat $lsname 141 | 142 | # numfiles=$(cat $lsname | wc -l) 143 | # n=$((numfiles)) 144 | # echo "There are $n files" 145 | # ~/tempestextremes/bin/DensityCalculations --std --inlist $lsname --var $BLOB_VAR --out $outname 146 | # python ~/tempestextremes/test/plot_density.py $outname "ERA $n yr $s" "avg" "$PLOT_TITLE" 147 | #done 148 | 149 | #cp /Volumes/ExFAT_drive/ERA_files/ERA_blobs/ERA*plot.png ~/figs/ 150 | -------------------------------------------------------------------------------- /test/control_shell/ERA_DETECT_LOOP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SEASONS=("MAM" "JJA" "SON" "DJF") 4 | mstart=(3 6 9 12) 5 | ystart=1980 6 | ycalc=2003 7 | yend=2000 8 | DDIR=/Volumes/ExFAT_drive/ERA_files 9 | BDIR=$DDIR/ERA_detect 10 | #VAR="GHGrad" 11 | 12 | VARVEC=("Z" "IPV") 13 | 14 | #Addition of regional parameters 15 | SECTOR=("NA" "NC" "NP" "SA" "SI" "SP") 16 | LEFT_BOUND=(250 30 130 290 20 120) 17 | RIGHT_BOUND=(50 150 270 40 140 310) 18 | MIN_LAT=(25 25 25 -75 -75 -75) 19 | MAX_LAT=(75 75 75 -25 -25 -25) 20 | 21 | SUFF="" 22 | BLOB_SUFF="" 23 | STAT_SUFF="" 24 | DENS_SUFF="" 25 | INVAR="" 26 | BLOB_VAR="" 27 | PLOT_TITLE="" 28 | 29 | for VAR in ${VARVEC[@]}; do 30 | 31 | if [ "$VAR" == "IPV" ]; then 32 | SUFF="integ_devs_norm_const.nc" 33 | BLOB_SUFF="blobs_nostitch_const.nc" 34 | STAT_SUFF="stats_nostitch_const.txt" 35 | DENS_SUFF="dens_nostitch_const.nc" 36 | INVAR="INT_ADIPV" 37 | BLOB_VAR="PV_BLOB" 38 | PLOT_TITLE="PV blocking" 39 | elif [ "$VAR" == "Z" ]; then 40 | SUFF="z500_devs_norm_const.nc" 41 | BLOB_SUFF="Zblobs_nostitch_const.nc" 42 | STAT_SUFF="Zstats_nostitch_const.txt" 43 | DENS_SUFF="Zdens_nostitch_const.nc" 44 | INVAR="INT_ADGH" 45 | BLOB_VAR="Z_BLOB" 46 | PLOT_TITLE="Z blocking" 47 | elif [ "$VAR" == "GHGrad" ]; then 48 | SUFF="z500_GHG.nc" 49 | BLOB_SUFF="GHGblobs_nostitch.nc" 50 | STAT_SUFF="GHGstats_nostitch.txt" 51 | DENS_SUFF="GHGdens_nostitch.nc" 52 | INVAR="GHGrad" 53 | BLOB_VAR="GHG_BLOB" 54 | PLOT_TITLE="GHG blocking" 55 | fi 56 | 57 | 58 | 59 | if [ ! -e $BDIR ]; then 60 | mkdir -p $BDIR 61 | fi 62 | 63 | for ((y=1980; y<=2005; y++)); do 64 | i=0 65 | SUBDIR=$DDIR/ERA_$y 66 | echo "Entering subdirectory $SUBDIR" 67 | if [ ! -e $SUBDIR ]; then 68 | echo "Missing directory. Check connection." 69 | exit 70 | fi 71 | for s in ${SEASONS[@]}; do 72 | 73 | if [ "$s" == "DJF" ]; then 74 | echo "DJF!" 75 | mstring="$SUBDIR/ERA*_12_vars_$SUFF" 76 | echo "ls $mstring" | sh > bloblist 77 | 78 | yn=$((y+1)) 79 | SUBDIR2=$DDIR/ERA_$yn 80 | 81 | mstring="$SUBDIR2/ERA*_0[12]_vars_$SUFF" 82 | echo "ls $mstring" | sh >>bloblist 83 | # if [ "$VAR" == "IPV" ]; then 84 | # ls $SUBDIR/*_12_vars_integ_devs.nc > bloblist 85 | # fi 86 | # else if [ "$VAR" == "GH" ]; then 87 | # ls $SUBDIR/*_12_vars_GH_devs.nc > bloblist 88 | # fi 89 | 90 | # if [ "$VAR" == "IPV" ]; then 91 | # ls $SUBDIR2/*_0[12]_vars_integ_devs.nc >> bloblist 92 | # fi 93 | # else if [ "$VAR" == "GH" ]; then 94 | # ls $SUBDIR2/*_0[12]_vars_GH_devs.nc >> bloblist 95 | # fi 96 | 97 | else 98 | m=${mstart[i]} 99 | # m1=${mstart[i]} 100 | # m2=$((m1+2)) 101 | # echo "m1 is $m1 and m2 is $m2" 102 | # echo "ls $SUBDIR/*_*{$m1..$m2}_*devs.nc" | sh > bloblist 103 | 104 | mstring=$(printf "$SUBDIR/ERA*_{%02d,%02d,%02d}_vars_$SUFF" $m $((m+1)) $((m+2))) 105 | echo $mstring 106 | echo "ls $mstring" | sh > bloblist 107 | fi 108 | cat bloblist 109 | for n in {0..5}; do 110 | secname=${SECTOR[n]} 111 | # blobsname="$BDIR/ERA_"$y"_"$s"_"$BLOB_SUFF 112 | # statsname="$BDIR/ERA_"$y"_"$s"_"$STAT_SUFF 113 | # densname="$BDIR/ERA_"$y"_"$s"_"$DENS_SUFF 114 | # vdensname="$BDIR/ERA_"$y"_"$s"_var_"$DENS_SUFF 115 | 116 | blobsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$BLOB_SUFF 117 | statsname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$STAT_SUFF 118 | densname="$BDIR/ERA_"$y"_"$s"_""$secname""_"$DENS_SUFF 119 | vdensname="$BDIR/ERA_"$y"_"$s"_var_""$secname""_"$DENS_SUFF 120 | 121 | ~/tempestextremes/bin/DetectBlobs --inlist bloblist --out $blobsname --var $INVAR --outvar $BLOB_VAR --minlat ${MIN_LAT[n]} --maxlat ${MAX_LAT[n]} --minlon ${LEFT_BOUND[n]} --maxlon ${RIGHT_BOUND[n]} --thresholdcmd "minarea,1000000000000" 122 | ~/tempestextremes/bin/BlobStats --infile $blobsname --outfile $statsname --invar $BLOB_VAR --out minlat,maxlat,minlon,maxlon,centlat,centlon,area 123 | # ~/tempestextremes/bin/DensityCalculations --in $blobsname --var $BLOB_VAR --out $densname 124 | # ~/tempestextremes/bin/DensityCalculations --inlist bloblist --var $INVAR --out $vdensname 125 | n=$((n+1)) 126 | done 127 | i=$((i+1)) 128 | done 129 | done 130 | 131 | #cd $BDIR 132 | #for s in ${SEASONS[@]}; do 133 | # lsname="ERA_"$s"_blobs" 134 | # if [ -e $lsname ]; then 135 | # rm $lsname 136 | # fi 137 | # outname="$BDIR/ERA_"$s"_avg_"$DENS_SUFF 138 | # for ((y=ystart; y<=yend; y++)); do 139 | # echo "ls ERA_"$y"_"$s"_"$BLOB_SUFF" >> $lsname" | sh 140 | # done 141 | # cat $lsname 142 | 143 | # numfiles=$(cat $lsname | wc -l) 144 | # n=$((numfiles)) 145 | # echo "There are $n files" 146 | # ~/tempestextremes/bin/DensityCalculations --std --inlist $lsname --var $BLOB_VAR --out $outname 147 | # python ~/tempestextremes/test/plot_density.py $outname "ERA $n yr $s" "avg" "$PLOT_TITLE" 148 | #done 149 | done 150 | #cp /Volumes/ExFAT_drive/ERA_files/ERA_blobs/ERA*plot.png ~/figs/ 151 | -------------------------------------------------------------------------------- /test/control_shell/ERA_GHAnom_controls.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #This script will automate the PV blocking calculations for the input data list 4 | 5 | export ystart=$1 6 | export yend=$2 7 | export yint=$3 8 | export DATA_DIR="/Volumes/ExFAT_drive/ERA_files/" 9 | export AVG_INPUT="ERA_avg_GH_list.txt" 10 | export DEV_INPUT="ERA_dev_GH_list.txt" 11 | export BLOB_INPUT="ERA_blob_GH_list.txt" 12 | export SEASONS=( "DJF" "MAM" "JJA" "SON" ) 13 | export NDAYS=5 14 | 15 | 16 | if [ $# -lt 3 ]; then 17 | echo "Forgot to provide year numbers!" 18 | exit 19 | fi 20 | 21 | if [ ! -d $DATA_DIR ]; then 22 | echo "Error! Check your file path." 23 | exit 24 | fi 25 | 26 | cd $DATA_DIR 27 | 28 | if [ -e $AVG_INPUT ]; then 29 | rm $AVG_INPUT 30 | fi 31 | 32 | if [ -e $DEV_INPUT ]; then 33 | rm $DEV_INPUT 34 | fi 35 | 36 | if [ -e $BLOB_INPUT ]; then 37 | rm $BLOB_INPUT 38 | fi 39 | 40 | #Search for files over specified range 41 | ys=$((ystart)) 42 | ye=$((yend)) 43 | for ((y=$ys; y<=$ye; y++)); do 44 | for m in $(seq -f "%02g" 1 12); do 45 | infile="ERA_"$y"/ERA_"$y"_"$m"_vars.nc" 46 | outfile="ERA_"$y"/ERA_"$y"_"$m"_vars_z500.nc" 47 | #run integration code if file doesn't already exist 48 | if [ ! -e $outfile ]; then 49 | ~/tempestextremes/bin/Var4Dto3D --in $infile --varlist Z --gh --out $outfile --hpa 50 | fi 51 | echo $outfile >> $AVG_INPUT 52 | done 53 | done 54 | 55 | #List of files for year that PV* is calculated 56 | yi=$((yint)) 57 | for m in $(seq -f "%02g" 1 12); do 58 | infile="ERA_"$yi"/ERA_"$yi"_"$m"_vars_z500.nc" 59 | echo $infile >> $DEV_INPUT 60 | done 61 | 62 | #Calculate block average 63 | avg_outfile="ERA_avg/ERA_"$ys"_"$ye"_Z500_avg.nc" 64 | if [ ! -e $avg_outfile ]; then 65 | ~/tempestextremes/bin/BlockingAvg --inlist $AVG_INPUT --out $avg_outfile --varname Z --avgname AVGZ 66 | fi 67 | 68 | c=0 69 | #Make list to input for StitchBlobs 70 | for m in $(seq -f "%02g" 1 12); do 71 | devfile="ERA_"$yi"/ERA_"$yi"_"$m"_vars_z500_devs.nc" 72 | if [ ! -e $devfile ]; then 73 | c=$((c+1)) 74 | fi 75 | echo $devfile >> $BLOB_INPUT 76 | done 77 | 78 | #echo $c " files missing" 79 | #calculate deviations 80 | #if [ c -gt 0 ]; then 81 | ~/tempestextremes/bin/BlockingDevs --inlist $DEV_INPUT --avg $avg_outfile --varname Z --avgname AVGZ --gh 82 | #fi 83 | 84 | #nsteps=$((NDAYS*4)) 85 | #Run StitchBlobs for whole year 86 | #blobsfile="ERA_"$yi"_"$NDAYS"day_GHAnom_blobs.nc" 87 | #~/tempestextremes/StitchBlobs --inlist $BLOB_INPUT --out $blobsfile --var INT_ADGH --minsize 5 --mintime $nsteps 88 | 89 | #Calculate blocking density for year 90 | #densfile="ERA_"$yi"_GHAnom_density.nc" 91 | #~/tempestextremes/blockingDensity --in $blobsfile --var INT_ADGHtag --out $densfile 92 | 93 | #Make plot of density for year 94 | #python ~/tempestextremes/plot_density.py $densfile year $yi "GH Anomaly" 95 | 96 | #echo "Finished year plot." 97 | #Run StitchBlobs for seasonal (DJF,MAM,JJA,SON) 98 | #DJF 99 | #yprev=$((yi-1)) 100 | #check that December (previous year!) file exists 101 | #dfile="ERA_"$yprev"/ERA_"$yprev"_12_vars_GH_devs.nc" 102 | #if [ ! -e $dfile ]; then 103 | # echo "Error: deviations not calculated for December of "$yprev 104 | # exit 105 | #else 106 | #array of season list file names 107 | # x=0 108 | # declare -a SFILES 109 | # for s in ${SEASONS[@]}; do 110 | # echo $x 111 | # SFILES[x]="ERA_"$s"_GHAnom_blobs_list.txt" 112 | # x=$((x+1)) 113 | # done 114 | #initialize lists 115 | # echo $dfile > ${SFILES[0]} 116 | # echo "ERA_"$yi"/ERA_"$yi"_03_vars_GH_devs.nc" > ${SFILES[1]} 117 | # echo "ERA_"$yi"/ERA_"$yi"_06_vars_GH_devs.nc" > ${SFILES[2]} 118 | # echo "ERA_"$yi"/ERA_"$yi"_09_vars_GH_devs.nc" > ${SFILES[3]} 119 | # #other files added to lists 120 | # for n in $(seq 1 2); do 121 | # for x in $(seq 0 3); do 122 | # n1=$((3*$x+$n)) 123 | # echo $n1 124 | # n2=$(printf "%02d" $n1) 125 | # echo $n2 126 | # echo "ERA_"$yi"/ERA_"$yi"_"$n2"_vars_GH_devs.nc" >> ${SFILES[$x]} 127 | # done 128 | # done 129 | #fi 130 | 131 | # Run StitchBlobs and density calcs for seasonal 132 | #for x in $(seq 0 3); do 133 | # blobsfile="ERA_"$yi"_"${SEASONS[$x]}"_"$NDAYS"day_GHAnom_blobs.nc" 134 | # densfile="ERA_"$yi"_"${SEASONS[$x]}"_GHAnom_density.nc" 135 | # 136 | # ~/tempestextremes/StitchBlobs --inlist ${SFILES[$x]} --out $blobsfile --var INT_ADGH --minsize 5 --mintime $nsteps 137 | # ~/tempestextremes/blockingDensity --in $blobsfile --var INT_ADGHtag --out $densfile 138 | # python ~/tempestextremes/plot_density.py $densfile ${SEASONS[$x]} $yi "GH Anomaly" 139 | #done 140 | 141 | -------------------------------------------------------------------------------- /test/control_shell/interp_clivar_to_climo.ncl: -------------------------------------------------------------------------------- 1 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl" 2 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl" 3 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl" 4 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl" 5 | 6 | begin 7 | in=addfile(fin,"r") 8 | lon = in->lon(:) 9 | lat = in->lat(:) 10 | 11 | newlon=fspan(0.,359.,360) 12 | newlon@units=lon@units 13 | newlat=fspan(-90.,90.,181) 14 | newlat@units=lat@units 15 | 16 | ;;Read in the variables 17 | newipv=linint2(lon,in->IPV&lat,in->IPV,True,newlon,newlat,0) 18 | newipv!0="lat" 19 | newipv!1="lon" 20 | newipv&lat=newlat 21 | newipv&lon=newlon 22 | 23 | newT=linint2(lon,in->AVGT&lat,in->AVGT,True,newlon,newlat,0) 24 | newT!0="lat" 25 | newT!1="lon" 26 | newT&lat=newlat 27 | newT&lon=newlon 28 | 29 | newU=linint2(lon,in->AVGU&lat,in->AVGU,True,newlon,newlat,0) 30 | newU!0="lat" 31 | newU!1="lon" 32 | newU&lat=newlat 33 | newU&lon=newlon 34 | 35 | newV=linint2(lon,in->AVGV&lat,in->AVGV,True,newlon,newlat,0) 36 | newV!0="lat" 37 | newV!1="lon" 38 | newV&lat=newlat 39 | newV&lon=newlon 40 | 41 | system("/bin/rm " + fout) 42 | out=addfile(fout,"c") 43 | 44 | out->IPV = newipv 45 | out->AVGT = newT 46 | out->AVGU = newU 47 | out->AVGV = newV 48 | 49 | end -------------------------------------------------------------------------------- /test/control_shell/new_splits.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #YEAR="" 4 | #SEASON="" 5 | #DATA="" 6 | #DIR="" 7 | #for i in "$@"; do 8 | # case $i in 9 | # --year=*) 10 | # YEAR="${i#*=}" 11 | # ;; 12 | # --season=*) 13 | # SEASON="${i#*=}" 14 | # ;; 15 | # --data=*) 16 | # DATA="${i#*=}" 17 | # ;; 18 | # --dir=*) 19 | # DIR="${i#*=}" 20 | # ;; 21 | # *) 22 | # 23 | # ;; 24 | # esac 25 | #done 26 | DATAS=("climo" "2xCO2" "SSTplus2") 27 | for d in ${DATAS[@]}; do 28 | DIR=$SCRATCH/$d 29 | SEASONS=("DJF" "MAM" "JJA" "SON") 30 | for s in ${SEASONS[@]}; do 31 | SEASON=$s 32 | for YEAR in {2..24}; do 33 | YINIT=$((YEAR)) 34 | MONTH=0 35 | if [[ "$SEASON" == "DJF" ]]; then 36 | YINIT=$((YEAR-1)) 37 | MONTH=12 38 | elif [[ "$SEASON" == "MAM" ]]; then 39 | MONTH=3 40 | elif [[ "$SEASON" == "JJA" ]]; then 41 | MONTH=6 42 | elif [[ "$SEASON" == "SON" ]]; then 43 | MONTH=9 44 | fi 45 | 46 | 47 | 48 | MINIT=$((MONTH-1)) 49 | YF=$(printf "%04d" $YINIT) 50 | M=$(printf "%02d" $MONTH) 51 | 52 | #Check that file hasn't already been split 53 | FILE_CHECK=$(ls $DIR/data/*$YF-$M-01*z500_devs.nc 2>/dev/null | wc -l) 54 | 55 | if [[ $FILE_CHECK -gt 0 ]]; then 56 | echo "File has already been split." 57 | else 58 | 59 | 60 | MF=$(printf "%02d" $MINIT) 61 | SPLIT_FILE=$(ls $DIR/data/*$YF-$MF*z500_devs.nc | tail -1) 62 | if [[ "$SPLIT_FILE" == "" ]]; then 63 | echo "File doesn't exist." 64 | else 65 | echo $SPLIT_FILE 66 | ~/tempestextremes/bin/split_file --in $SPLIT_FILE --rename --vars DGH,ADGH,INT_ADGH 67 | fi 68 | fi 69 | done 70 | done 71 | done 72 | -------------------------------------------------------------------------------- /test/import_scripts/batch_anom_import.py: -------------------------------------------------------------------------------- 1 | from ecmwfapi import ECMWFDataServer 2 | import sys 3 | 4 | server = ECMWFDataServer() 5 | 6 | server.retrieve({ 7 | "dataset" : "interim", 8 | "date" : sys.argv[1], #dates 9 | "stream" : "oper", 10 | # "format" : "netcdf", 11 | "levtype" : "sfc", 12 | "param" : "131003", #T, U, V 13 | "step" : "0", 14 | "time" : "00:00:00/06:00:00/12:00:00/18:00:00", 15 | "type" : "an", 16 | "area" : "90/0/-90/359", 17 | "grid" : "1.0/1.0", 18 | "target" : sys.argv[2], 19 | }) 20 | sys.exit(0) 21 | -------------------------------------------------------------------------------- /test/import_scripts/batch_import.py: -------------------------------------------------------------------------------- 1 | from ecmwfapi import ECMWFDataServer 2 | import sys 3 | 4 | server = ECMWFDataServer() 5 | 6 | server.retrieve({ 7 | "dataset" : "interim", 8 | "date" : sys.argv[1], #dates 9 | # "format" : "netcdf", 10 | "levelist" : "150/200/250/300/350/400/450/500/600/700/850/1000", #includes information on these pressure levels 11 | "stream" : "oper", 12 | "levtype" : "pl", 13 | "param" : "129.128/130.128/131.128/132.128/138.128", #PV, T, U, V, rel vorticity 14 | "step" : "0", 15 | "time" : "00:00:00/06:00:00/12:00:00/18:00:00", 16 | "type" : "an", 17 | "area" : "90/0/-90/359", 18 | "grid" : "1.0/1.0", 19 | "target" : sys.argv[2], 20 | }) 21 | sys.exit(0) 22 | -------------------------------------------------------------------------------- /test/import_scripts/batch_sfc_import.py: -------------------------------------------------------------------------------- 1 | from ecmwfapi import ECMWFDataServer 2 | import sys 3 | 4 | server = ECMWFDataServer() 5 | 6 | server.retrieve({ 7 | "dataset" : "interim", 8 | "date" : sys.argv[1], #dates 9 | "stream" : "oper", 10 | "format" : "netcdf", 11 | "levtype" : "sfc", 12 | "param" : "165.128/166.128/167.128", #T, U, V 13 | "step" : "0", 14 | "time" : "00:00:00/06:00:00/12:00:00/18:00:00", 15 | "type" : "an", 16 | "area" : "90/0/-90/359", 17 | "grid" : "1.0/1.0", 18 | "target" : sys.argv[2], 19 | }) 20 | sys.exit(0) 21 | -------------------------------------------------------------------------------- /test/import_scripts/format_nc_ERA.py: -------------------------------------------------------------------------------- 1 | #This script re-formats the ERA-Interim data so it can be properly read by VisIt 2 | import sys 3 | import MV2 4 | import cdms2 5 | from array import array 6 | 7 | cdms2.setNetcdfShuffleFlag(0) 8 | cdms2.setNetcdfDeflateFlag(0) 9 | cdms2.setNetcdfDeflateLevelFlag(0) 10 | 11 | file_in = cdms2.open(sys.argv[1], 'r') 12 | 13 | time_axis = file_in.axes['initial_time0_hours'] 14 | time_axis.id = 'time' 15 | time_units = time_axis.units 16 | DefaultCalendar=time_axis.getCalendar() 17 | time_axis.designateTime() 18 | 19 | plev_axis = file_in.axes['lv_ISBL1'] 20 | plev_axis.id = 'lev' 21 | plev_axis.designateLevel() 22 | 23 | lat_axis = file_in.axes['g0_lat_2'] 24 | lat_axis.id = 'lat' 25 | lat_axis.designateLatitude() 26 | 27 | lon_axis = file_in.axes['g0_lon_3'] 28 | lon_axis.id = 'lon' 29 | lon_axis.designateLongitude() 30 | 31 | outname = sys.argv[1].replace(".nc", "_mod.nc") 32 | file_out = cdms2.open(outname, 'w') 33 | 34 | for attributes in file_in.listglobal(): 35 | setattr(file_out, attributes, getattr(file_in, attributes)) 36 | 37 | t_var = file_in.variables['T_GDS0_ISBL'] 38 | t_var.id = 'T' 39 | 40 | u_var = file_in.variables['U_GDS0_ISBL'] 41 | u_var.id = 'U' 42 | 43 | v_var = file_in.variables['V_GDS0_ISBL'] 44 | v_var.id = 'V' 45 | 46 | #vort_var = file_in.variables['VO_GDS0_ISBL'] 47 | #vort_var.id = 'VO' 48 | 49 | #pv_var = file_in.variables['PV_GDS0_ISBL'] 50 | #pv_var.id = 'PV' 51 | 52 | #z_var = file_in.variables['Z_GDS0_ISBL'] 53 | #z_var.id = 'Z' 54 | 55 | #var_dict = {'T':t_var, 'U':u_var, 'V':v_var, 'VO':vort_var, 'PV':pv_var, 'Z':z_var} 56 | var_dict = {'T':t_var,'U':u_var,'V':v_var} 57 | var_keys = var_dict.keys() 58 | var_len = len(var_keys) 59 | 60 | var_shape = (time_axis.shape[0], plev_axis.shape[0], lat_axis.shape[0], lon_axis.shape[0]) 61 | for i in range(0, var_len): 62 | var_name = var_keys[i] 63 | var_call = MV2.array(var_dict[var_name]) 64 | if var_call.shape == var_shape: 65 | var_call.setAxis(0, time_axis) 66 | var_call.setAxis(1, plev_axis) 67 | var_call.setAxis(2, lat_axis) 68 | var_call.setAxis(3, lon_axis) 69 | file_out.write(var_call) 70 | 71 | file_out.close() 72 | file_in.close() 73 | sys.exit(0) 74 | --------------------------------------------------------------------------------