├── .gitmodules ├── pmgrid.F90 ├── dycore.F90 ├── src_override ├── atmos_cmip_interp.inc ├── atmos_cmip_diag.F90 └── fv_tracer2d.F90 ├── spmd_dyn.F90 ├── dycore_budget.F90 ├── dimensions_mod.F90 ├── interp_mod.F90 ├── Makefile.in.fv3 ├── stepon.F90 ├── restart_dynamics.F90 ├── dyn_grid.F90 └── dp_coupling.F90 /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "atmos_cubed_sphere"] 2 | path = atmos_cubed_sphere 3 | url = https://github.com/NOAA-GFDL/GFDL_atmos_cubed_sphere.git 4 | fxtag = FV3-202204-public 5 | fxrequired = AlwaysRequired 6 | fxDONOTUSEurl = https://github.com/NOAA-GFDL/GFDL_atmos_cubed_sphere.git 7 | -------------------------------------------------------------------------------- /pmgrid.F90: -------------------------------------------------------------------------------- 1 | module pmgrid 2 | 3 | ! PLON and PLAT do not correspond to the number of latitudes and longitudes in 4 | ! this version of dynamics. 5 | 6 | implicit none 7 | save 8 | 9 | integer, parameter :: plev = PLEV ! number of vertical levels 10 | integer, parameter :: plevp = plev + 1 11 | 12 | integer, parameter :: plon = 1 13 | integer, parameter :: plat = 1 14 | 15 | end module pmgrid 16 | -------------------------------------------------------------------------------- /dycore.F90: -------------------------------------------------------------------------------- 1 | module dycore 2 | 3 | implicit none 4 | private 5 | 6 | public :: dycore_is 7 | 8 | !======================================================================= 9 | contains 10 | !======================================================================= 11 | 12 | logical function dycore_is(name) 13 | 14 | character(len=*) :: name 15 | 16 | dycore_is = .false. 17 | if (name == 'unstructured' .or. name == 'UNSTRUCTURED' .or. name == 'fv3' .or. name == 'FV3') then 18 | dycore_is = .true. 19 | end if 20 | 21 | return 22 | end function dycore_is 23 | 24 | end module dycore 25 | -------------------------------------------------------------------------------- /src_override/atmos_cmip_interp.inc: -------------------------------------------------------------------------------- 1 | 2 | if (logp <= pm(1)) then 3 | if (extrap) then 4 | ap(i,j,kp) = a(i,j,1) + (a(i,j,3)-a(i,j,1)) * (logp-pm(1))/(pm(3)-pm(1)) 5 | else 6 | ap(i,j,kp) = a(i,j,1) 7 | endif 8 | else if (logp >= pm(km)) then 9 | if (extrap) then 10 | ap(i,j,kp) = a(i,j,km) + (a(i,j,km)-a(i,j,km-2)) * (logp-pm(km))/(pm(km)-pm(km-2)) 11 | else 12 | ap(i,j,kp) = a(i,j,km) 13 | endif 14 | else 15 | do k = 1, km-1 16 | if (logp <= pm(k+1) .and. logp >= pm(k)) then 17 | ap(i,j,kp) = a(i,j,k) + (a(i,j,k+1)-a(i,j,k)) * (logp-pm(k))/(pm(k+1)-pm(k)) 18 | go to 1000 19 | endif 20 | enddo 21 | endif 22 | 1000 continue 23 | 24 | -------------------------------------------------------------------------------- /spmd_dyn.F90: -------------------------------------------------------------------------------- 1 | module spmd_dyn 2 | 3 | ! Purpose: SPMD implementation of CAM FV3 dynamics. 4 | 5 | implicit none 6 | private 7 | 8 | ! These variables are not used locally, but are set and used in phys_grid. 9 | ! They probably should be moved there. 10 | logical, public :: local_dp_map=.true. ! flag indicates that mapping between dynamics 11 | ! and physics decompositions does not require 12 | ! interprocess communication 13 | integer, public :: block_buf_nrecs ! number of local grid points (lon,lat,lev) 14 | ! in dynamics decomposition (including level 0) 15 | integer, public :: chunk_buf_nrecs ! number of local grid points (lon,lat,lev) 16 | ! in physics decomposition (including level 0) 17 | ! assigned in phys_grid.F90 18 | end module spmd_dyn 19 | -------------------------------------------------------------------------------- /dycore_budget.F90: -------------------------------------------------------------------------------- 1 | module dycore_budget 2 | 3 | implicit none 4 | 5 | public :: print_budget 6 | 7 | !========================================================================================= 8 | contains 9 | !========================================================================================= 10 | 11 | subroutine print_budget(hstwr) 12 | 13 | use spmd_utils, only: masterproc 14 | use cam_abortutils, only: endrun 15 | use cam_budget, only: thermo_budget_histfile_num, thermo_budget_history 16 | 17 | ! arguments 18 | logical, intent(in) :: hstwr(:) 19 | character(len=*), parameter :: subname = 'dycore_budget:print_budgets:' 20 | 21 | !-------------------------------------------------------------------------------------- 22 | 23 | if (masterproc .and. thermo_budget_history .and. hstwr(thermo_budget_histfile_num)) then 24 | call endrun(subname//' is not implemented for the FV3 dycore') 25 | end if 26 | end subroutine print_budget 27 | end module dycore_budget 28 | -------------------------------------------------------------------------------- /dimensions_mod.F90: -------------------------------------------------------------------------------- 1 | module dimensions_mod 2 | use shr_kind_mod, only: r8=>shr_kind_r8 3 | 4 | implicit none 5 | private 6 | 7 | 8 | !These are convenience variables for local use only, and are set to values in Atm% 9 | integer, public :: npx, npy, ntiles 10 | 11 | integer, parameter, public :: nlev=PLEV 12 | integer, parameter, public :: nlevp=nlev+1 13 | 14 | ! 15 | ! The variables below hold indices of water vapor and condensate loading tracers as well as 16 | ! associated heat capacities (initialized in dyn_init): 17 | ! 18 | ! qsize_condensate_loading_idx = FV3 index of water tracers included in condensate loading according to FV3 dynamics 19 | ! qsize_condensate_loading_idx_gll = CAM index of water tracers included in condensate loading terms given FV3 index 20 | ! 21 | integer, allocatable, public :: qsize_tracer_idx_cam2dyn(:) 22 | character(len=16), allocatable, public :: cnst_name_ffsl(:) ! constituent names for FV3 tracers 23 | character(len=128), allocatable, public :: cnst_longname_ffsl(:) ! long name of FV3 tracers 24 | ! 25 | !moist cp in energy conversion term 26 | ! 27 | ! .false.: force dycore to use cpd (cp dry) instead of moist cp 28 | ! .true. : use moist cp in dycore 29 | ! 30 | logical , public :: fv3_lcp_moist = .false. 31 | logical , public :: fv3_lcv_moist = .false. 32 | logical , public :: fv3_scale_ttend = .false. 33 | 34 | end module dimensions_mod 35 | 36 | -------------------------------------------------------------------------------- /interp_mod.F90: -------------------------------------------------------------------------------- 1 | module interp_mod 2 | ! inline interpolation routines not implemented yet 3 | use shr_kind_mod, only : r8=>shr_kind_r8 4 | use cam_abortutils, only : endrun 5 | 6 | implicit none 7 | private 8 | save 9 | 10 | public :: setup_history_interpolation 11 | public :: set_interp_hfile 12 | public :: write_interpolated 13 | 14 | interface write_interpolated 15 | module procedure write_interpolated_scalar 16 | module procedure write_interpolated_vector 17 | end interface 18 | integer, parameter :: nlat=0, nlon=0 19 | contains 20 | 21 | subroutine setup_history_interpolation(interp_ok, mtapes, interp_output, & 22 | interp_info) 23 | use cam_history_support, only: interp_info_t 24 | 25 | ! Dummy arguments 26 | logical, intent(inout) :: interp_ok 27 | integer, intent(in) :: mtapes 28 | logical, intent(in) :: interp_output(:) 29 | type(interp_info_t), intent(inout) :: interp_info(:) 30 | 31 | interp_ok = .false. 32 | 33 | end subroutine setup_history_interpolation 34 | 35 | subroutine set_interp_hfile(hfilenum, interp_info) 36 | use cam_history_support, only: interp_info_t 37 | 38 | ! Dummy arguments 39 | integer, intent(in) :: hfilenum 40 | type(interp_info_t), intent(inout) :: interp_info(:) 41 | call endrun('ERROR:set_interp_hfile - This routine is a stub, you shouldnt get here') 42 | end subroutine set_interp_hfile 43 | 44 | subroutine write_interpolated_scalar(File, varid, fld, numlev, data_type, decomp_type) 45 | use pio, only : file_desc_t, var_desc_t 46 | use shr_kind_mod, only : r8=>shr_kind_r8 47 | 48 | type(file_desc_t), intent(inout) :: File 49 | type(var_desc_t), intent(inout) :: varid 50 | real(r8), intent(in) :: fld(:,:,:) 51 | integer, intent(in) :: numlev, data_type, decomp_type 52 | call endrun('ERROR:write_interpolated_scalar - This routine is a stub, you shouldnt get here') 53 | 54 | end subroutine write_interpolated_scalar 55 | 56 | subroutine write_interpolated_vector(File, varidu, varidv, fldu, fldv, numlev, data_type, decomp_type) 57 | use pio, only : file_desc_t, var_desc_t 58 | 59 | type(file_desc_t), intent(inout) :: File 60 | type(var_desc_t), intent(inout) :: varidu, varidv 61 | real(r8), intent(in) :: fldu(:,:,:), fldv(:,:,:) 62 | integer, intent(in) :: numlev, data_type, decomp_type 63 | call endrun('ERROR:write_interpolated_vector - This routine is a stub, you shouldnt get here') 64 | 65 | end subroutine write_interpolated_vector 66 | 67 | end module interp_mod 68 | -------------------------------------------------------------------------------- /Makefile.in.fv3: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # 3 | # The Makefile for building the FV3 library is created by CAM's configure 4 | # using this template and prepending the following macros: 5 | # 6 | # The macro CAM_BLD is also prepended. It is the build directory of the CAM 7 | # code and it contains the abortutils.mod file. The abortutils module is 8 | # referenced by FV3 code in order to perform an abort which is appropriate 9 | # for the CESM system. 10 | # 11 | # The main customization required for the library to link with CAM is to 12 | # use autopromotion of the default real type to real*8. This is required 13 | # in most, though not all, of the FV3 files. Also, some compilers require 14 | # special flags to specify fixed or free format source (rather than depend 15 | # on filename extensions). Thus, the explicit rules at the end of this 16 | # template for compiling FV3 files have been modified to allow different 17 | # sets of flags for 1) files that cannot be compiled with autopromotion, 18 | # and 2) files that use fixed format source. 19 | # 20 | # The generated Makefile will be used by a sub-Make issued from CAM's Make. 21 | # The sub-Make will inherit the macros: 22 | # 23 | # FC name of Fortran90 compiler 24 | # FC_FLAGS Fortran compiler flags 25 | # 26 | ######################################################################## 27 | AR := ar 28 | RM := rm 29 | CP := cp 30 | 31 | # Load dependency search path. 32 | cpp_dirs := . 33 | cpp_dirs += $(shell cat Filepath) 34 | 35 | # Create VPATH from Filepath file created by CAM configure 36 | # Expand any tildes in directory names. Change spaces to colons. 37 | VPATH := $(foreach dir,$(cpp_dirs),$(wildcard $(dir))) 38 | VPATH := $(subst $(space),:,$(VPATH)) 39 | 40 | F90 := $(FC) 41 | C90 := $(CC) 42 | F90FLAGS := $(FREEFLAGS) $(FFLAGS) 43 | 44 | # Additional GNU flags needed for FV3 45 | ifeq ($(strip $(COMPILER)),gnu) 46 | F90FLAGS += -fcray-pointer 47 | endif 48 | 49 | complib: $(COMPLIB) Depends 50 | 51 | #------------------------------------------------------------------------------- 52 | # Build & include dependency files 53 | #------------------------------------------------------------------------------- 54 | 55 | touch_filepath: 56 | touch Filepath 57 | 58 | # Get list of files and build dependency file for all .o files 59 | # using perl scripts mkSrcfiles and mkDepends 60 | # if a source is of form .F90.in strip the .in before creating the list of objects 61 | 62 | SKIPFILES := fv_iau_mod.F90 63 | SOURCES := $(filter-out $(SKIPFILES), $(shell cat Srcfiles)) 64 | BASENAMES := $(basename $(basename $(SOURCES))) 65 | OBJS := $(addsuffix .o, $(BASENAMES)) 66 | INCS := $(foreach dir,$(cpp_dirs),-I$(dir)) 67 | 68 | CURDIR := $(shell pwd) 69 | 70 | Depends: Srcfiles Deppath 71 | $(CASETOOLS)/mkDepends $(USER_MKDEPENDS_OPTS) Deppath Srcfiles > $@ 72 | 73 | Deppath: Filepath 74 | $(CP) -f Filepath $@ 75 | @echo "$(MINCROOT)" >> $@ 76 | 77 | Srcfiles: Filepath 78 | $(CASETOOLS)/mkSrcfiles 79 | 80 | #------------------------------------------------------------------------------- 81 | 82 | db_files: 83 | @echo " " 84 | @echo "* VPATH := $(VPATH)" 85 | db_flags: 86 | @echo " " 87 | @echo "* cc := $(CC) $(CFLAGS) $(INCLDIR) $(INCS)" 88 | @echo "* .F.o := $(FC) $(F90FLAGS) $(INCLDIR) $(INCS)" 89 | 90 | #------------------------------------------------------------------------------- 91 | # build rules: 92 | #------------------------------------------------------------------------------- 93 | .SUFFIXES: 94 | .SUFFIXES: .F90 .F .f90 .f .c .cpp .o .in 95 | 96 | ifdef INCLUDE_DIR 97 | $(COMPLIB): $(OBJS) 98 | @echo "* OBJS := $(OBJS)" 99 | $(AR) -r $(COMPLIB) $(OBJS) 100 | $(CP) *.$(MOD_SUFFIX) *.h $(INCLUDE_DIR) 101 | else 102 | $(COMPLIB): $(OBJS) 103 | @echo "* OBJS := $(OBJS)" 104 | $(AR) -r $(COMPLIB) $(OBJS) 105 | endif 106 | 107 | #------------------------------------------------------------------------------- 108 | # Rules for gnu specific compiler directives for FV3 library code 109 | #------------------------------------------------------------------------------- 110 | 111 | ifeq ($(FC_TYPE), gnu) 112 | fv_arrays.o: fv_arrays.F90 113 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) -fno-range-check $< 114 | 115 | fv_regional_bc.o: fv_regional_bc.F90 116 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) -fno-range-check $< 117 | 118 | gfdl_cloud_microphys.o: gfdl_cloud_microphys.F90 119 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) -fdec $< 120 | 121 | module_mp_radar.o: module_mp_radar.F90 122 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) -fdec $< 123 | endif 124 | 125 | .c.o: 126 | $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) $(CPPDEFS) $< 127 | .F.o: 128 | $(F90) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< 129 | .f.o: 130 | $(F90) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< 131 | .f90.o: 132 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) $< 133 | .F90.o: 134 | $(F90) -c $(INCLDIR) $(INCS) $(F90FLAGS) $(CONTIGUOUS_FLAG) $< 135 | .cpp.o: 136 | $(CXX) -c $(INCLDIR) $(INCS) $(CXXFLAGS) $< 137 | 138 | %.F90: %.F90.in 139 | $(CIMEROOT)/src/externals/genf90/genf90.pl $< > $@ 140 | 141 | clean_objs: 142 | rm -f $(OBJS) *.mod *.o 143 | 144 | clean: 145 | rm -f $(COMPLIB) $(OBJS) *.mod *.o 146 | 147 | # the if-tests prevent DEPS files from being created when they're not needed 148 | ifneq ($(MAKECMDGOALS), db_files) 149 | ifneq ($(MAKECMDGOALS), db_flags) 150 | ifeq (,$(findstring clean,$(MAKECMDGOALS))) 151 | -include Depends $(CASEROOT)/Depends.$(COMPILER) $(CASEROOT)/Depends.$(MACH) $(CASEROOT)/Depends.$(MACH).$(COMPILER) 152 | endif 153 | endif 154 | endif 155 | -------------------------------------------------------------------------------- /stepon.F90: -------------------------------------------------------------------------------- 1 | module stepon 2 | 3 | ! MODULE: stepon -- FV3 Dynamics specific time-stepping 4 | 5 | use shr_kind_mod, only: r8 => shr_kind_r8 6 | use physics_types, only: physics_state, physics_tend 7 | use ppgrid, only: begchunk, endchunk 8 | use perf_mod, only: t_startf, t_stopf, t_barrierf 9 | use spmd_utils, only: iam, masterproc, mpicom 10 | use dyn_comp, only: dyn_import_t, dyn_export_t 11 | use dyn_grid, only: mytile 12 | use time_manager, only: get_step_size 13 | use dimensions_mod, only: qsize_tracer_idx_cam2dyn 14 | 15 | use aerosol_properties_mod, only: aerosol_properties 16 | use aerosol_state_mod, only: aerosol_state 17 | use microp_aero, only: aerosol_state_object, aerosol_properties_object 18 | 19 | implicit none 20 | private 21 | 22 | public stepon_init ! Initialization 23 | public stepon_run1 ! run method phase 1 24 | public stepon_run2 ! run method phase 2 25 | public stepon_run3 ! run method phase 3 26 | public stepon_final ! Finalization 27 | 28 | class(aerosol_properties), pointer :: aero_props_obj => null() 29 | logical :: aerosols_transported = .false. 30 | 31 | !======================================================================= 32 | contains 33 | !======================================================================= 34 | 35 | subroutine stepon_init(dyn_in, dyn_out) 36 | 37 | ! ROUTINE: stepon_init -- Time stepping initialization 38 | 39 | use cam_history, only: addfld, add_default, horiz_only 40 | use constituents, only: pcnst, cnst_name, cnst_longname 41 | 42 | type (dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container 43 | type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container 44 | 45 | ! local variables 46 | integer :: m_cnst,m_cnst_ffsl 47 | !---------------------------------------------------------------------------- 48 | ! These fields on dynamics grid are output before the call to d_p_coupling. 49 | do m_cnst = 1, pcnst 50 | m_cnst_ffsl=qsize_tracer_idx_cam2dyn(m_cnst) 51 | call addfld(trim(cnst_name(m_cnst))//'_ffsl', (/ 'lev' /), 'I', 'kg/kg', & 52 | trim(cnst_longname(m_cnst)), gridname='FFSLHIST') 53 | call addfld(trim(cnst_name(m_cnst))//'_mass_ffsl', (/ 'lev' /), 'I', 'kg/kg', & 54 | trim(cnst_longname(m_cnst))//'*dp', gridname='FFSLHIST') 55 | end do 56 | call addfld('U_ffsl' ,(/ 'lev' /), 'I', 'm/s ','U wind on A grid after dynamics',gridname='FFSLHIST') 57 | call addfld('V_ffsl' ,(/ 'lev' /), 'I', 'm/s ','V wind on A grid after dynamics',gridname='FFSLHIST') 58 | call addfld('U_ffsl_ns' ,(/ 'lev' /), 'I', 'm/s ','U wind on NS grid after dynamics',gridname='FFSLHIST_NS') 59 | call addfld('V_ffsl_ew' ,(/ 'lev' /), 'I', 'm/s ','V wind on EW grid after dynamics',gridname='FFSLHIST_EW') 60 | call addfld('T_ffsl' ,(/ 'lev' /), 'I', 'K ' ,'T on A grid grid after dynamics' ,gridname='FFSLHIST') 61 | call addfld('PS_ffsl', horiz_only, 'I', 'Pa', 'Surface pressure on A grid after dynamics',gridname='FFSLHIST') 62 | call addfld('PHIS_ffsl', horiz_only, 'I', 'Pa', 'Geopotential height on A grid after dynamics',gridname='FFSLHIST') 63 | 64 | 65 | ! Fields for initial condition files 66 | call addfld('U&IC', (/ 'lev' /), 'I', 'm/s', 'Zonal wind', gridname='FFSLHIST' ) 67 | call addfld('V&IC', (/ 'lev' /), 'I', 'm/s', 'Meridional wind',gridname='FFSLHIST' ) 68 | ! Don't need to register U&IC V&IC as vector components since we don't interpolate IC files 69 | call add_default('U&IC',0, 'I') 70 | call add_default('V&IC',0, 'I') 71 | 72 | call addfld('PS&IC', horiz_only, 'I', 'Pa', 'Surface pressure',gridname='FFSLHIST') 73 | call addfld('PHIS&IC', horiz_only, 'I', 'Pa', 'PHIS on ffsl grid',gridname='FFSLHIST') 74 | call addfld('T&IC', (/ 'lev' /), 'I', 'K', 'Temperature', gridname='FFSLHIST') 75 | call add_default('PS&IC',0, 'I') 76 | call add_default('PHIS&IC',0, 'I') 77 | call add_default('T&IC ',0, 'I') 78 | 79 | do m_cnst = 1,pcnst 80 | call addfld(trim(cnst_name(m_cnst))//'&IC', (/ 'lev' /), 'I', 'kg/kg', & 81 | trim(cnst_longname(m_cnst)), gridname='FFSLHIST') 82 | call add_default(trim(cnst_name(m_cnst))//'&IC', 0, 'I') 83 | end do 84 | 85 | ! get aerosol properties 86 | aero_props_obj => aerosol_properties_object() 87 | 88 | if (associated(aero_props_obj)) then 89 | ! determine if there are transported aerosol contistuents 90 | aerosols_transported = aero_props_obj%number_transported()>0 91 | end if 92 | 93 | end subroutine stepon_init 94 | 95 | !======================================================================= 96 | 97 | subroutine stepon_run1(dtime_out, phys_state, phys_tend, pbuf2d, dyn_in, dyn_out) 98 | 99 | ! ROUTINE: stepon_run1 -- Phase 1 of dynamics run method. 100 | 101 | use physics_buffer, only: physics_buffer_desc 102 | use dp_coupling, only: d_p_coupling 103 | 104 | real(r8), intent(out) :: dtime_out ! Time-step 105 | type (physics_state), intent(inout) :: phys_state(begchunk:endchunk) 106 | type (physics_tend), intent(inout) :: phys_tend(begchunk:endchunk) 107 | type (physics_buffer_desc), pointer :: pbuf2d(:,:) 108 | type (dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container 109 | type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container 110 | 111 | integer :: c 112 | class(aerosol_state), pointer :: aero_state_obj 113 | nullify(aero_state_obj) 114 | 115 | dtime_out = get_step_size() 116 | 117 | call diag_dyn_out(dyn_out,'') 118 | 119 | !---------------------------------------------------------- 120 | ! Move data into phys_state structure. 121 | !---------------------------------------------------------- 122 | 123 | call t_barrierf('sync_d_p_coupling', mpicom) 124 | call t_startf('d_p_coupling') 125 | call d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) 126 | call t_stopf('d_p_coupling') 127 | 128 | !---------------------------------------------------------- 129 | ! update aerosol state object from CAM physics state constituents 130 | !---------------------------------------------------------- 131 | if (aerosols_transported) then 132 | 133 | do c = begchunk,endchunk 134 | aero_state_obj => aerosol_state_object(c) 135 | ! pass number mass or number mixing ratios of aerosol constituents 136 | ! to aerosol state object 137 | call aero_state_obj%set_transported(phys_state(c)%q) 138 | end do 139 | 140 | end if 141 | 142 | end subroutine stepon_run1 143 | 144 | !======================================================================= 145 | 146 | subroutine stepon_run2(phys_state, phys_tend, dyn_in, dyn_out) 147 | 148 | ! ROUTINE: stepon_run2 -- second phase run method 149 | 150 | use dp_coupling, only: p_d_coupling 151 | use dyn_comp, only: calc_tot_energy_dynamics 152 | 153 | type (physics_state), intent(inout) :: phys_state(begchunk:endchunk) 154 | type (physics_tend), intent(inout) :: phys_tend(begchunk:endchunk) 155 | type (dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container 156 | type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container 157 | 158 | integer :: c 159 | class(aerosol_state), pointer :: aero_state_obj 160 | 161 | ! copy from phys structures -> dynamics structures 162 | 163 | !---------------------------------------------------------- 164 | ! update physics state with aerosol constituents 165 | !---------------------------------------------------------- 166 | nullify(aero_state_obj) 167 | 168 | if (aerosols_transported) then 169 | do c = begchunk,endchunk 170 | aero_state_obj => aerosol_state_object(c) 171 | ! get mass or number mixing ratios of aerosol constituents 172 | call aero_state_obj%get_transported(phys_state(c)%q) 173 | end do 174 | end if 175 | 176 | call t_barrierf('sync_p_d_coupling', mpicom) 177 | #if ( defined CALC_ENERGY ) 178 | call calc_tot_energy_dynamics(dyn_in%atm, 'dED') 179 | #endif 180 | call t_startf('p_d_coupling') 181 | call p_d_coupling(phys_state, phys_tend, dyn_in) 182 | call t_stopf('p_d_coupling') 183 | 184 | #if ( defined CALC_ENERGY ) 185 | call calc_tot_energy_dynamics(dyn_in%atm, 'dBD') 186 | #endif 187 | end subroutine stepon_run2 188 | 189 | !======================================================================= 190 | 191 | subroutine stepon_run3(dtime, cam_out, phys_state, dyn_in, dyn_out) 192 | 193 | use camsrfexch, only: cam_out_t 194 | use dyn_comp, only: dyn_run 195 | 196 | real(r8), intent(in) :: dtime ! Time-step 197 | type (physics_state), intent(in):: phys_state(begchunk:endchunk) 198 | type (dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container 199 | type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container 200 | type (cam_out_t), intent(inout) :: cam_out(begchunk:endchunk) 201 | 202 | call t_barrierf('sync_dyn_run', mpicom) 203 | call t_startf('dyn_run') 204 | call dyn_run(dyn_out) 205 | call t_stopf('dyn_run') 206 | 207 | end subroutine stepon_run3 208 | 209 | !======================================================================= 210 | 211 | subroutine stepon_final(dyn_in, dyn_out) 212 | 213 | ! ROUTINE: stepon_final -- Dynamics finalization 214 | 215 | use dyn_comp, only: dyn_final 216 | 217 | type (dyn_import_t), intent(inout) :: dyn_in ! Dynamics import container 218 | type (dyn_export_t), intent(inout) :: dyn_out ! Dynamics export container 219 | 220 | call t_startf('dyn_final') 221 | call dyn_final(dyn_in, dyn_out) 222 | call t_stopf('dyn_final') 223 | 224 | end subroutine stepon_final 225 | 226 | !======================================================================= 227 | 228 | subroutine diag_dyn_out(dyn_in,suffx) 229 | 230 | use cam_history, only: write_inithist, outfld, hist_fld_active, fieldname_len 231 | use constituents, only: cnst_name, pcnst 232 | use dyn_grid, only: mytile 233 | use fv_arrays_mod, only: fv_atmos_type 234 | use dimensions_mod, only: nlev 235 | 236 | type (dyn_export_t), intent(in) :: dyn_in 237 | character*(*) , intent(in) :: suffx ! suffix for "outfld" names 238 | 239 | 240 | ! local variables 241 | integer :: is,ie,js,je, j, m_cnst,m_cnst_ffsl 242 | integer :: idim 243 | character(len=fieldname_len) :: tfname 244 | 245 | type (fv_atmos_type), pointer :: Atm(:) 246 | 247 | !---------------------------------------------------------------------------- 248 | 249 | Atm=>dyn_in%atm 250 | 251 | is = Atm(mytile)%bd%is 252 | ie = Atm(mytile)%bd%ie 253 | js = Atm(mytile)%bd%js 254 | je = Atm(mytile)%bd%je 255 | 256 | idim=ie-is+1 257 | ! Output tracer fields for analysis of advection schemes 258 | do m_cnst = 1, pcnst 259 | m_cnst_ffsl=qsize_tracer_idx_cam2dyn(m_cnst) 260 | tfname = trim(cnst_name(m_cnst))//'_ffsl'//trim(suffx) 261 | if (hist_fld_active(tfname)) then 262 | do j = js, je 263 | call outfld(tfname, RESHAPE(Atm(mytile)%q(is:ie, j, :, m_cnst_ffsl),(/idim,nlev/)), idim, j) 264 | end do 265 | end if 266 | end do 267 | 268 | ! Output tracer fields for analysis of advection schemes 269 | do m_cnst = 1, pcnst 270 | m_cnst_ffsl=qsize_tracer_idx_cam2dyn(m_cnst) 271 | tfname = trim(cnst_name(m_cnst))//'_mass_ffsl'//trim(suffx) 272 | if (hist_fld_active(tfname)) then 273 | do j = js, je 274 | call outfld(tfname,RESHAPE((Atm(mytile)%q(is:ie,j,:,m_cnst_ffsl)*Atm(mytile)%delp(is:ie,j,:)),(/idim,nlev/)),idim, j) 275 | end do 276 | end if 277 | end do 278 | 279 | if (hist_fld_active('U_ffsl'//trim(suffx)) .or. hist_fld_active('V_ffsl'//trim(suffx))) then 280 | do j = js, je 281 | call outfld('U_ffsl'//trim(suffx), RESHAPE(Atm(mytile)%ua(is:ie, j, :),(/idim,nlev/)), idim, j) 282 | call outfld('V_ffsl'//trim(suffx), RESHAPE(Atm(mytile)%va(is:ie, j, :),(/idim,nlev/)), idim, j) 283 | end do 284 | end if 285 | 286 | if (hist_fld_active('U_ffsl_ns'//trim(suffx))) then 287 | do j = js, je+1 288 | call outfld('U_ffsl_ns'//trim(suffx), RESHAPE(Atm(mytile)%u(is:ie, j, :),(/idim,nlev/)), idim, j) 289 | end do 290 | end if 291 | 292 | if (hist_fld_active('V_ffsl_ew'//trim(suffx))) then 293 | do j = js, je 294 | call outfld('V_ffsl_ew'//trim(suffx), RESHAPE(Atm(mytile)%v(is:ie+1, j, :),(/idim+1,nlev/)), idim+1, j) 295 | end do 296 | end if 297 | 298 | if (hist_fld_active('T_ffsl'//trim(suffx))) then 299 | do j = js, je 300 | call outfld('T_ffsl'//trim(suffx), RESHAPE(Atm(mytile)%pt(is:ie, j, :),(/idim,nlev/)), idim, j) 301 | end do 302 | end if 303 | 304 | if (hist_fld_active('PS_ffsl'//trim(suffx))) then 305 | do j = js, je 306 | call outfld('PS_ffsl'//trim(suffx), Atm(mytile)%ps(is:ie, j), idim, j) 307 | end do 308 | end if 309 | 310 | if (hist_fld_active('PHIS_ffsl'//trim(suffx))) then 311 | do j = js, je 312 | call outfld('PHIS_ffsl'//trim(suffx), Atm(mytile)%phis(is:ie, j), idim, j) 313 | end do 314 | end if 315 | 316 | if (write_inithist()) then 317 | 318 | do j = js, je 319 | call outfld('T&IC', RESHAPE(Atm(mytile)%pt(is:ie, j, :),(/idim,nlev/)), idim, j) 320 | call outfld('U&IC', RESHAPE(Atm(mytile)%ua(is:ie, j, :),(/idim,nlev/)), idim, j) 321 | call outfld('V&IC', RESHAPE(Atm(mytile)%va(is:ie, j, :),(/idim,nlev/)), idim, j) 322 | call outfld('PS&IC', Atm(mytile)%ps(is:ie, j), idim, j) 323 | call outfld('PHIS&IC', Atm(mytile)%phis(is:ie, j), idim, j) 324 | 325 | do m_cnst = 1, pcnst 326 | m_cnst_ffsl=qsize_tracer_idx_cam2dyn(m_cnst) 327 | call outfld(trim(cnst_name(m_cnst))//'&IC', RESHAPE(Atm(mytile)%q(is:ie, j, :, m_cnst_ffsl),(/idim,nlev/)), idim, j) 328 | end do 329 | end do 330 | end if ! if (write_inithist) 331 | 332 | end subroutine diag_dyn_out 333 | 334 | end module stepon 335 | -------------------------------------------------------------------------------- /restart_dynamics.F90: -------------------------------------------------------------------------------- 1 | module restart_dynamics 2 | 3 | ! Write and read dynamics fields from the restart file. For exact restart 4 | ! it is necessary to write all element data, including duplicate columns, 5 | ! to the file. 6 | 7 | use cam_abortutils, only: endrun 8 | use cam_grid_support, only: cam_grid_header_info_t, cam_grid_id, cam_grid_write_attr, & 9 | cam_grid_write_var, cam_grid_get_decomp, cam_grid_dimensions, max_hcoordname_len 10 | use cam_logfile, only: iulog 11 | use cam_pio_utils, only: cam_pio_handle_error 12 | use dyn_comp, only: dyn_import_t, dyn_export_t 13 | use dyn_grid, only: mytile 14 | use fv_arrays_mod, only: fv_atmos_type 15 | use pio, only: file_desc_t, var_desc_t 16 | use shr_kind_mod, only: r8 => shr_kind_r8, i8 => shr_kind_i8 17 | use spmd_utils, only: masterproc 18 | 19 | implicit none 20 | private 21 | 22 | public :: init_restart_dynamics, write_restart_dynamics, read_restart_dynamics 23 | 24 | type(var_desc_t) :: udesc, vdesc, tdesc, psdesc, phisdesc, usdesc,vsdesc,delpdesc,omegadesc 25 | 26 | integer :: ncol_d_dimid, ncol_d_ew_dimid, ncol_d_ns_dimid, nlev_dimid, nlevp_dimid 27 | type(var_desc_t), allocatable :: qdesc(:) 28 | integer :: is,ie,js,je 29 | 30 | 31 | !======================================================================= 32 | contains 33 | !======================================================================= 34 | 35 | subroutine init_restart_dynamics(File, dyn_out) 36 | 37 | use constituents, only: cnst_name, pcnst 38 | use hycoef, only: init_restart_hycoef 39 | use pio, only: pio_unlimited, pio_double, pio_def_dim, & 40 | pio_seterrorhandling, pio_bcast_error, & 41 | pio_def_var, & 42 | pio_inq_dimid 43 | 44 | ! arguments 45 | type(file_desc_t), intent(inout) :: file 46 | type(dyn_export_t), intent(in) :: dyn_out 47 | 48 | ! local variables 49 | integer :: vdimids(2) 50 | integer :: ierr, i, err_handling 51 | integer :: time_dimid 52 | integer :: is,ie,js,je 53 | type (fv_atmos_type), pointer :: Atm(:) 54 | 55 | integer :: grid_id,grid_id_ns,grid_id_ew 56 | type(cam_grid_header_info_t) :: info,info_ew,info_ns 57 | 58 | !--------------------------------------------------------------------------- 59 | 60 | Atm=>dyn_out%atm 61 | 62 | is = Atm(mytile)%bd%is 63 | ie = Atm(mytile)%bd%ie 64 | js = Atm(mytile)%bd%js 65 | je = Atm(mytile)%bd%je 66 | 67 | call init_restart_hycoef(File, vdimids) 68 | 69 | call pio_seterrorhandling(File, pio_bcast_error, err_handling) 70 | 71 | ierr = PIO_Def_Dim(File, 'time', PIO_UNLIMITED, time_dimid) 72 | 73 | grid_id = cam_grid_id('FFSL') 74 | call cam_grid_write_attr(File, grid_id, info) 75 | ncol_d_dimid = info%get_hdimid(1) 76 | 77 | grid_id_ew = cam_grid_id('FFSL_EW') 78 | call cam_grid_write_attr(File, grid_id_ew, info_ew) 79 | ncol_d_ew_dimid = info_ew%get_hdimid(1) 80 | 81 | grid_id_ns = cam_grid_id('FFSL_NS') 82 | call cam_grid_write_attr(File, grid_id_ns, info_ns) 83 | ncol_d_ns_dimid = info_ns%get_hdimid(1) 84 | 85 | nlev_dimid = vdimids(1) 86 | 87 | ierr = PIO_Def_Var(File, 'U', pio_double, (/ncol_d_dimid, nlev_dimid/), Udesc) 88 | ierr = PIO_Def_Var(File, 'V', pio_double, (/ncol_d_dimid, nlev_dimid/), Vdesc) 89 | ierr = PIO_Def_Var(File, 'US', pio_double, (/ncol_d_ns_dimid, nlev_dimid/), USdesc) 90 | ierr = PIO_Def_Var(File, 'VS', pio_double, (/ncol_d_ew_dimid, nlev_dimid/), VSdesc) 91 | ierr = PIO_Def_Var(File, 'T', pio_double, (/ncol_d_dimid, nlev_dimid/), Tdesc) 92 | ierr = PIO_Def_Var(File, 'OMEGA', pio_double, (/ncol_d_dimid, nlev_dimid/), omegadesc) 93 | ierr = PIO_Def_Var(File, 'DELP', pio_double, (/ncol_d_dimid, nlev_dimid/), delpdesc) 94 | ierr = PIO_Def_Var(File, 'PS', pio_double, (/ncol_d_dimid/), PSdesc) 95 | ierr = PIO_Def_Var(File, 'PHIS', pio_double, (/ncol_d_dimid/), phisdesc) 96 | 97 | allocate(Qdesc(pcnst)) 98 | 99 | do i = 1, pcnst 100 | ierr = PIO_Def_Var(File, cnst_name(i), pio_double, (/ncol_d_dimid, nlev_dimid/), Qdesc(i)) 101 | end do 102 | 103 | call pio_seterrorhandling(File, err_handling) 104 | 105 | end subroutine init_restart_dynamics 106 | 107 | !======================================================================= 108 | 109 | subroutine write_restart_dynamics(File, dyn_out) 110 | 111 | use hycoef, only: write_restart_hycoef 112 | use constituents, only: pcnst 113 | use dimensions_mod, only: nlev 114 | use pio, only: pio_offset_kind, io_desc_t, pio_double, pio_write_darray 115 | use time_manager, only: get_curr_time, get_curr_date 116 | 117 | ! arguments 118 | type(file_desc_t), intent(inout) :: File 119 | type(dyn_export_t), intent(in) :: dyn_out 120 | 121 | ! local variables 122 | integer(pio_offset_kind), parameter :: t_idx = 1 123 | type (fv_atmos_type), pointer :: Atm(:) 124 | 125 | type(io_desc_t),pointer :: iodesc3d,iodesc3d_ns,iodesc3d_ew,iodesc 126 | integer :: m, ierr 127 | integer :: array_lens_3d(3), array_lens_2d(2) 128 | integer :: file_lens_2d(2), file_lens_1d(1) 129 | integer :: grid_id,grid_id_ns,grid_id_ew 130 | integer :: grid_dimlens(2),grid_dimlens_ew(2),grid_dimlens_ns(2) 131 | integer :: ilen,jlen 132 | 133 | !--------------------------------------------------------------------------- 134 | 135 | call write_restart_hycoef(File) 136 | 137 | Atm=>dyn_out%atm 138 | is = Atm(mytile)%bd%is 139 | ie = Atm(mytile)%bd%ie 140 | js = Atm(mytile)%bd%js 141 | je = Atm(mytile)%bd%je 142 | 143 | grid_id = cam_grid_id('FFSL') 144 | grid_id_ew = cam_grid_id('FFSL_EW') 145 | grid_id_ns = cam_grid_id('FFSL_NS') 146 | 147 | ! write coordinate variables for unstructured FFSL, NS and EW restart grid 148 | ! (restart grids have tile based global indicies with duplicate edge points 149 | ! being given uniq indicies. All duplicate point written out to restart file) 150 | ! - io overhead = 6 tile edges are duplicated and read from the file 151 | ! instead of mpi gathers to fill in duplicates. 152 | 153 | call cam_grid_write_var(File, grid_id) 154 | call cam_grid_write_var(File, grid_id_ew) 155 | call cam_grid_write_var(File, grid_id_ns) 156 | 157 | ! create map for distributed write 158 | call cam_grid_dimensions(grid_id, grid_dimlens) 159 | call cam_grid_dimensions(grid_id_ew, grid_dimlens_ew) 160 | call cam_grid_dimensions(grid_id_ns, grid_dimlens_ns) 161 | 162 | ilen=ie-is+1 163 | jlen=je-js+1 164 | 165 | ! create map for distributed write of 2D fields 166 | array_lens_2d = (/ilen,jlen/) 167 | file_lens_1d = (/grid_dimlens(1)/) 168 | call cam_grid_get_decomp(grid_id, array_lens_2d, file_lens_1d, pio_double, iodesc) 169 | ! Write PHIS 170 | call PIO_Write_Darray(File, phisdesc, iodesc, Atm(mytile)%phis(is:ie,js:je), ierr) 171 | ! Write PS 172 | call PIO_Write_Darray(File, psdesc, iodesc, Atm(mytile)%ps(is:ie,js:je), ierr) 173 | 174 | array_lens_3d = (/ilen,jlen,nlev/) 175 | file_lens_2d = (/grid_dimlens(1), nlev/) 176 | call cam_grid_get_decomp(grid_id, array_lens_3d, file_lens_2d, pio_double, iodesc3d) 177 | ! Write U a-grid 178 | call PIO_Write_Darray(File, Udesc, iodesc3d, Atm(mytile)%ua(is:ie,js:je,1:nlev), ierr) 179 | ! Write V a-grid 180 | call PIO_Write_Darray(File, Vdesc, iodesc3d, Atm(mytile)%va(is:ie,js:je,1:nlev) , ierr) 181 | ! Write OMEGA a-grid 182 | call PIO_Write_Darray(File, Omegadesc, iodesc3d, Atm(mytile)%omga(is:ie,js:je,1:nlev), ierr) 183 | ! Write DELP a-grid 184 | call PIO_Write_Darray(File, delpdesc, iodesc3d, Atm(mytile)%delp(is:ie,js:je,1:nlev), ierr) 185 | ! Write PT a-grid 186 | call PIO_Write_Darray(File, Tdesc, iodesc3d, Atm(mytile)%pt(is:ie,js:je,1:nlev), ierr) 187 | ! Write Tracers a-grid 188 | do m = 1, pcnst 189 | call PIO_Write_Darray(File, Qdesc(m), iodesc3d, Atm(mytile)%q(is:ie,js:je,1:nlev,m), ierr) 190 | end do 191 | 192 | deallocate(qdesc) 193 | 194 | ! create map for distributed write of 3D NS fields 195 | array_lens_3d = (/ilen ,(jlen+1), nlev/) 196 | file_lens_2d = (/grid_dimlens_ns(1), nlev/) 197 | call cam_grid_get_decomp(grid_id_ns, array_lens_3d, file_lens_2d, pio_double, iodesc3d_ns) 198 | 199 | !WRITE US 200 | call PIO_Write_Darray(File, USdesc, iodesc3d_ns, Atm(mytile)%u(is:ie,js:je+1,1:nlev), ierr) 201 | 202 | ! create map for distributed write of 3D EW fields 203 | array_lens_3d = (/(ilen+1), jlen, nlev /) 204 | file_lens_2d = (/grid_dimlens_ew(1), nlev/) 205 | call cam_grid_get_decomp(grid_id_ew, array_lens_3d, file_lens_2d, pio_double, iodesc3d_ew) 206 | 207 | !WRITE VS 208 | call PIO_Write_Darray(File, VSdesc, iodesc3d_ew, Atm(mytile)%v(is:ie+1,js:je,1:nlev), ierr) 209 | 210 | end subroutine write_restart_dynamics 211 | 212 | !======================================================================= 213 | 214 | subroutine read_restart_dynamics(File, dyn_in, dyn_out) 215 | 216 | use cam_history_support, only: max_fieldname_len 217 | use constituents, only: cnst_name, pcnst 218 | use dimensions_mod,only: npy,npx,nlev 219 | use dyn_comp, only: dyn_init 220 | use dyn_grid, only: Atm 221 | use mpp_domains_mod, only: mpp_update_domains, DGRID_NE, mpp_get_boundary 222 | use pio, only: file_desc_t, pio_double, & 223 | pio_inq_dimid, pio_inq_dimlen, pio_inq_varid, & 224 | pio_read_darray, file_desc_t, io_desc_t, pio_double,pio_offset_kind,& 225 | pio_seterrorhandling, pio_bcast_error 226 | 227 | ! arguments 228 | type(File_desc_t), intent(inout) :: File 229 | type(dyn_import_t), intent(out) :: dyn_in 230 | type(dyn_export_t), intent(out) :: dyn_out 231 | 232 | ! local variables 233 | integer(pio_offset_kind), parameter :: t_idx = 1 234 | 235 | integer :: tl 236 | integer :: i, k, m, j 237 | integer :: ierr, err_handling 238 | integer :: fnlev 239 | integer :: ncols_d_ns, ncols_d_ew, ncols_d 240 | 241 | integer :: ncol_d_dimid 242 | integer :: ncol_d_ns_dimid 243 | integer :: ncol_d_ew_dimid 244 | 245 | type(var_desc_t) :: omegadesc 246 | type(var_desc_t) :: delpdesc 247 | type(var_desc_t) :: udesc 248 | type(var_desc_t) :: vdesc 249 | type(var_desc_t) :: usdesc 250 | type(var_desc_t) :: vsdesc 251 | type(var_desc_t) :: tdesc 252 | type(var_desc_t) :: psdesc 253 | type(var_desc_t) :: phisdesc 254 | type(var_desc_t), allocatable :: qdesc(:) 255 | type(io_desc_t),pointer :: iodesc2d, iodesc3d,iodesc3d_ns,iodesc3d_ew 256 | integer :: array_lens_3d(3), array_lens_2d(2) 257 | integer :: file_lens_2d(2), file_lens_1d(1) 258 | integer :: grid_id,grid_id_ns,grid_id_ew,ilen,jlen 259 | integer :: grid_dimlens(2),grid_dimlens_ns(2),grid_dimlens_ew(2) 260 | 261 | real(r8), allocatable :: ebuffer(:,:) 262 | real(r8), allocatable :: nbuffer(:,:) 263 | 264 | character(len=*), parameter :: sub = 'read_restart_dynamics' 265 | character(len=256) :: errormsg 266 | !---------------------------------------------------------------------------- 267 | 268 | ! Note1: the hybrid coefficients are read from the same location as for an 269 | ! initial run (e.g., dyn_grid_init). 270 | 271 | ! Note2: the dyn_in and dyn_out objects are not associated with the Atm dynamics 272 | ! object until dyn_init is called. Until the restart is better integrated 273 | ! into dyn_init we just access Atm directly from the dyn_grid 274 | ! module. FV3 dyn_init calls an fv3 diagnostic init routine that tries to access 275 | ! surface pressure in the Atm structure and at the top of read_restart PS hasn't 276 | ! been read in yet. 277 | 278 | tl = 1 279 | 280 | is = Atm(mytile)%bd%is 281 | ie = Atm(mytile)%bd%ie 282 | js = Atm(mytile)%bd%js 283 | je = Atm(mytile)%bd%je 284 | 285 | call pio_seterrorhandling(File, pio_bcast_error, err_handling) 286 | 287 | ierr = PIO_Inq_DimID(File, 'lev', nlev_dimid) 288 | ierr = PIO_Inq_dimlen(File, nlev_dimid, fnlev) 289 | if (nlev /= fnlev) then 290 | write(errormsg, *) ': Restart file nlev dimension does not match model levels:',& 291 | 'file nlev=',fnlev,', model nlev=',nlev 292 | call endrun(sub//trim(errormsg)) 293 | end if 294 | 295 | ! variable descriptors of required dynamics fields 296 | ierr = PIO_Inq_varid(File, 'DELP', delpdesc) 297 | call cam_pio_handle_error(ierr, sub//': cannot find DELP') 298 | ierr = PIO_Inq_varid(File, 'OMEGA', omegadesc) 299 | call cam_pio_handle_error(ierr, sub//': cannot find OMEGA') 300 | ierr = PIO_Inq_varid(File, 'U', udesc) 301 | call cam_pio_handle_error(ierr, sub//': cannot find UA') 302 | ierr = PIO_Inq_varid(File, 'V', Vdesc) 303 | call cam_pio_handle_error(ierr, sub//': cannot find VA') 304 | ierr = PIO_Inq_varid(File, 'US', usdesc) 305 | call cam_pio_handle_error(ierr, sub//': cannot find US') 306 | ierr = PIO_Inq_varid(File, 'VS', Vsdesc) 307 | call cam_pio_handle_error(ierr, sub//': cannot find VS') 308 | ierr = PIO_Inq_varid(File, 'T', tdesc) 309 | call cam_pio_handle_error(ierr, sub//': cannot find T') 310 | ierr = PIO_Inq_varid(File, 'PS', psdesc) 311 | call cam_pio_handle_error(ierr, sub//': cannot find PS') 312 | ierr = PIO_Inq_varid(File, 'PHIS', phisdesc) 313 | call cam_pio_handle_error(ierr, sub//': cannot find PHIS') 314 | allocate(qdesc(pcnst)) 315 | do m = 1, pcnst 316 | ierr = PIO_Inq_varid(File, trim(cnst_name(m)), Qdesc(m)) 317 | call cam_pio_handle_error(ierr, sub//': cannot find '//trim(cnst_name(m))) 318 | end do 319 | 320 | ! check whether the restart fields on the GLL grid contain unique columns 321 | ! or the fv3 task structure (ncol_d_ns = (ie-is+1)*(je-js+2)+npes columns) 322 | ! or the fv3 task structure (ncol_d_ew = (ie-is+2)*(je-js+1)+npes columns) 323 | 324 | ierr = PIO_Inq_DimID(File, 'ncol_d', ncol_d_dimid) 325 | call cam_pio_handle_error(ierr, sub//': cannot find ncol_d') 326 | ierr = PIO_Inq_dimlen(File, ncol_d_dimid, ncols_d) 327 | 328 | ierr = PIO_Inq_DimID(File, 'ncol_d_ns', ncol_d_ns_dimid) 329 | call cam_pio_handle_error(ierr, sub//': cannot find ncol_d_ns') 330 | ierr = PIO_Inq_dimlen(File, ncol_d_ns_dimid, ncols_d_ns) 331 | 332 | ierr = PIO_Inq_DimID(File, 'ncol_d_ew', ncol_d_ew_dimid) 333 | call cam_pio_handle_error(ierr, sub//': cannot find ncol_d_ew') 334 | ierr = PIO_Inq_dimlen(File, ncol_d_ew_dimid, ncols_d_ew) 335 | 336 | grid_id = cam_grid_id('FFSL') 337 | grid_id_ns = cam_grid_id('FFSL_NS') 338 | grid_id_ew = cam_grid_id('FFSL_EW') 339 | call cam_grid_dimensions(grid_id, grid_dimlens) 340 | call cam_grid_dimensions(grid_id_ew, grid_dimlens_ew) 341 | call cam_grid_dimensions(grid_id_ns, grid_dimlens_ns) 342 | 343 | if (ncols_d /= grid_dimlens(1)) then 344 | write(errormsg, *) ':Restart file ncol_d dimension does not match number of model A-Grid columns',& 345 | 'Restart ncols_d=',ncols_d,', A-Grid ncols=',grid_dimlens(1) 346 | call endrun(sub//trim(errormsg)) 347 | end if 348 | 349 | if (ncols_d_ns /= grid_dimlens_ns(1)) then 350 | write(errormsg, *) ':Restart file ncol_d dimension does not match number of model D-Grid ns columns',& 351 | 'Restart ncols_d_ns=',ncols_d_ns,', D-Grid ns ncols=',grid_dimlens_ns(1) 352 | call endrun(sub//trim(errormsg)) 353 | end if 354 | 355 | if (ncols_d_ew /= grid_dimlens_ew(1)) then 356 | write(errormsg, *) ':Restart file ncol_d dimension does not match number of model D-Grid ew columns',& 357 | 'Restart ncols_d_ew=',ncols_d_ew,', D-Grid ew ncols=',grid_dimlens_ew(1) 358 | call endrun(sub//trim(errormsg)) 359 | end if 360 | 361 | ilen = ie-is+1 362 | jlen = je-js+1 363 | ! create map for distributed write of 2D fields 364 | array_lens_2d = (/ilen,jlen/) 365 | file_lens_1d = (/grid_dimlens(1)/) 366 | call cam_grid_get_decomp(grid_id, array_lens_2d, file_lens_1d, pio_double, iodesc2d) 367 | 368 | ! create map for distributed write of 3D fields 369 | array_lens_3d = (/ilen, jlen,nlev/) 370 | file_lens_2d = (/grid_dimlens(1), nlev/) 371 | call cam_grid_get_decomp(grid_id, array_lens_3d, file_lens_2d, pio_double, iodesc3d) 372 | 373 | ! create map for distributed write of 3D NS fields 374 | array_lens_3d = (/ilen, jlen+1, nlev/) 375 | file_lens_2d = (/grid_dimlens_ns(1), nlev/) 376 | call cam_grid_get_decomp(grid_id_ns, array_lens_3d, file_lens_2d, pio_double, iodesc3d_ns) 377 | 378 | ! create map for distributed write of 3D EW fields 379 | array_lens_3d = (/ilen+1, jlen, nlev/) 380 | file_lens_2d = (/grid_dimlens_ew(1), nlev/) 381 | call cam_grid_get_decomp(grid_id_ew, array_lens_3d, file_lens_2d, pio_double, iodesc3d_ew) 382 | 383 | ! PS 384 | call PIO_Read_Darray(File, psdesc, iodesc2d,atm(mytile)%ps(is:ie,js:je), ierr) 385 | ! PHIS 386 | call PIO_Read_Darray(File, phisdesc, iodesc2d, atm(mytile)%phis(is:ie,js:je), ierr) 387 | ! OMEGA 388 | call PIO_Read_Darray(File, omegadesc, iodesc3d,Atm(mytile)%omga(is:ie,js:je,1:nlev), ierr) 389 | ! DELP 390 | call PIO_Read_Darray(File, delpdesc, iodesc3d, atm(mytile)%delp(is:ie,js:je,1:nlev), ierr) 391 | ! T 392 | call PIO_Read_Darray(File, Tdesc, iodesc3d,atm(mytile)%pt(is:ie,js:je,1:nlev) , ierr) 393 | ! V 394 | call PIO_Read_Darray(File, Vdesc, iodesc3d, atm(mytile)%va(is:ie,js:je,1:nlev), ierr) 395 | ! U 396 | call PIO_Read_Darray(File, Udesc, iodesc3d, atm(mytile)%ua(is:ie,js:je,1:nlev), ierr) 397 | ! tracers 398 | do m = 1, pcnst 399 | call PIO_Read_Darray(File, Qdesc(m), iodesc3d, atm(mytile)%q(is:ie,js:je,1:nlev,m), ierr) 400 | end do 401 | 402 | deallocate(qdesc) 403 | 404 | ! US and VS After reading unique points on D grid call get_boundary routine to fill 405 | ! missing points on the north and east block boundaries which are duplicated between 406 | ! adjacent blocks. 407 | 408 | allocate(ebuffer(npy+2,nlev)) 409 | allocate(nbuffer(npx+2,nlev)) 410 | nbuffer = 0._r8 411 | ebuffer = 0._r8 412 | ! US 413 | call PIO_Read_Darray(File, USdesc, iodesc3d_ns, atm(mytile)%u(is:ie,js:je+1,1:nlev), ierr) 414 | ! VS 415 | call PIO_Read_Darray(File, VSdesc, iodesc3d_ew, atm(mytile)%v(is:ie+1,js:je,1:nlev), ierr) 416 | ! US/VS duplicates 417 | call mpp_get_boundary(atm(mytile)%u, atm(mytile)%v, atm(mytile)%domain, ebuffery=ebuffer, & 418 | nbufferx=nbuffer, gridtype=DGRID_NE ) 419 | do k=1,nlev 420 | do i=is,ie 421 | atm(mytile)%u(i,je+1,k) = nbuffer(i-is+1,k) 422 | enddo 423 | do j=js,je 424 | atm(mytile)%v(ie+1,j,k) = ebuffer(j-js+1,k) 425 | enddo 426 | enddo 427 | deallocate(ebuffer) 428 | deallocate(nbuffer) 429 | 430 | ! Update halo points on each processor 431 | 432 | call mpp_update_domains( Atm(mytile)%phis, Atm(mytile)%domain ) 433 | call mpp_update_domains( atm(mytile)%ps, Atm(mytile)%domain ) 434 | call mpp_update_domains( atm(mytile)%u,atm(mytile)%v, Atm(mytile)%domain, gridtype=DGRID_NE, complete=.true. ) 435 | call mpp_update_domains( atm(mytile)%pt, Atm(mytile)%domain ) 436 | call mpp_update_domains( atm(mytile)%delp, Atm(mytile)%domain ) 437 | call mpp_update_domains( atm(mytile)%omga, Atm(mytile)%domain ) 438 | call mpp_update_domains( atm(mytile)%q, Atm(mytile)%domain ) 439 | 440 | call dyn_init(dyn_in, dyn_out) 441 | 442 | call pio_seterrorhandling(File, err_handling) 443 | 444 | 445 | end subroutine read_restart_dynamics 446 | 447 | end module restart_dynamics 448 | -------------------------------------------------------------------------------- /src_override/atmos_cmip_diag.F90: -------------------------------------------------------------------------------- 1 | module atmos_cmip_diag_mod 2 | 3 | !---------------------------------------------------------------------- 4 | ! Module for registering and sending (writing) 3D CMIP diagnostic 5 | ! data on model levels and pressure levels. New vertical axes are 6 | ! defined from lowest to uppermost level (opposite the model). 7 | ! Prefined pressure axes corresponding to CMIP axes are used. 8 | ! The vertical axis used is specified via the 'module_name' field 9 | ! in the diag_table. 10 | !---------------------------------------------------------------------- 11 | 12 | use mpp_mod, only: input_nml_file 13 | use fms_mod, only: open_namelist_file, check_nml_error, & 14 | close_file, stdlog, mpp_pe, mpp_root_pe, & 15 | write_version_number, file_exist, & 16 | error_mesg, FATAL, WARNING, NOTE, & 17 | lowercase, string 18 | use time_manager_mod, only: time_type 19 | use diag_manager_mod, only: diag_axis_init, register_diag_field, & 20 | send_data, get_diag_field_id, & 21 | register_static_field, & 22 | diag_axis_add_attribute, & 23 | diag_field_add_attribute, & 24 | DIAG_FIELD_NOT_FOUND 25 | use diag_data_mod, only: CMOR_MISSING_VALUE 26 | 27 | !---------------------------------------------------------------------- 28 | 29 | implicit none 30 | private 31 | 32 | !---------------------------------------------------------------------- 33 | 34 | public :: atmos_cmip_diag_init, atmos_cmip_diag_end, & 35 | register_cmip_diag_field_2d, & 36 | register_cmip_diag_field_3d, & 37 | send_cmip_data_3d, & 38 | query_cmip_diag_id 39 | 40 | !---------------------------------------------------------------------- 41 | 42 | ! vertical pressure grids 43 | ! plev = same as plev17, unless use_extra_levels = .true. 44 | ! plev19 = Table Amon = standard 17 levels + 5, 1 hPa 45 | ! plev8 = daily data 46 | ! plev7h = HighResMIP (6hr time mean, 3hr synoptic) 47 | ! plev3 = used in CMIP5 for 6hrPlev 48 | ! plev23 = plev19 + (7,3,2,0.4 hPa) 49 | 50 | real, dimension(23) :: plev23 = & 51 | (/ 100000., 92500., 85000., 70000., 60000., 50000., & 52 | 40000., 30000., 25000., 20000., 15000., 10000., & 53 | 7000., 5000., 3000., 2000., 1000., 700., & 54 | 500., 300., 200., 100., 40. /) 55 | real, dimension(19) :: plev19 = & 56 | (/ 100000., 92500., 85000., 70000., 60000., 50000., & 57 | 40000., 30000., 25000., 20000., 15000., 10000., & 58 | 7000., 5000., 3000., 2000., 1000., 500., & 59 | 100. /) 60 | real, dimension(8) :: plev8 = & 61 | (/ 100000., 85000., 70000., 50000., & 62 | 25000., 10000., 5000., 1000. /) 63 | real, dimension(7) :: plev7h = & 64 | (/ 92500., 85000., 70000., 60000., 50000., 25000., 5000. /) 65 | real, dimension(3) :: plev3 = & 66 | (/ 85000., 50000., 25000. /) 67 | 68 | !----------------------------------------------------------------------- 69 | !--- namelist --- 70 | 71 | logical :: use_extra_levels = .true. ! use more than the standard 72 | ! 17 pressure levels when possible 73 | 74 | logical :: flip_cmip_levels = .true. ! flip vertical model level output 75 | ! from bottom(surface) to top. 76 | 77 | logical :: output_modeling_realm = .false. ! add modeling_realm attribute 78 | ! to all variables 79 | 80 | character(len=64) :: modeling_realm_default = 'atmos' ! default modeling_realm attribute 81 | ! can be overriden in 82 | ! register_cmip_diag 83 | 84 | integer :: verbose = 1 ! verbose level = 0,1,2 85 | 86 | namelist /atmos_cmip_diag_nml/ use_extra_levels, flip_cmip_levels, & 87 | output_modeling_realm, modeling_realm_default, & 88 | verbose 89 | 90 | !----------------------------------------------------------------------- 91 | 92 | integer, parameter :: MAXPLEVS = 6 ! max plev sets 93 | integer, dimension(MAXPLEVS) :: num_pres_levs 94 | real, dimension(MAXPLEVS,50) :: pressure_levels ! max 50 levels per set 95 | 96 | character(len=16) :: mod_name = 'cmip' 97 | 98 | ! cmip vertical axis names 99 | ! index -1 = 'levhalf' half model levels 100 | ! index 0 = 'lev' full model levels 101 | ! index >0 = 'plev*' pressure levels 102 | character(len=128), dimension(-1:MAXPLEVS) :: cmip_axis_names 103 | integer, dimension(3,-1:MAXPLEVS) :: cmip_axis_data 104 | 105 | integer :: area_id 106 | 107 | !---------------------------------------------------------------------- 108 | 109 | !--- store field id for all possible axes 110 | ! index 0 = on model level (either full or half) 111 | ! index >0 = on pressure levels 112 | public :: cmip_diag_id_type 113 | type cmip_diag_id_type 114 | integer, dimension(0:MAXPLEVS) :: field_id = 0 115 | end type cmip_diag_id_type 116 | 117 | !---------------------------------------------------------------------- 118 | 119 | character(len=128) :: version = '$Id$' 120 | character(len=128) :: tagname = '$Name$' 121 | 122 | logical :: module_is_initialized=.false. 123 | 124 | CONTAINS 125 | 126 | !####################################################################### 127 | 128 | subroutine atmos_cmip_diag_init ( ak, bk, ptop, axes, Time ) 129 | real, intent(in), dimension(:) :: ak, bk ! ap,b at model layer interfaces 130 | real, intent(in) :: ptop ! pressure at top level 131 | integer, intent(in) :: axes(2) ! x/y axes identifiers 132 | type(time_type), intent(in) :: Time 133 | 134 | !----------------------------------------------------------------------- 135 | ! local data 136 | 137 | integer :: axes3d(3), k, kk, ind, np, id_plev, num_std_plevs 138 | integer :: nlev 139 | integer :: iunit, ierr, io 140 | logical :: used 141 | character(len=16) :: axis_name 142 | integer :: flip 143 | 144 | real :: p0 145 | real, dimension(size(ak,1)-1) :: ap, b, lev 146 | real, dimension(2,size(ak,1)-1) :: ap_bnds, b_bnds, lev_bnds 147 | real, dimension(size(ak,1)) :: levhalf 148 | 149 | integer :: id_lev, id_levhalf, id_nv, id_ap, id_b, & 150 | id_ap_bnds, id_b_bnds, id_lev_bnds 151 | 152 | !----------------------------------------------------------------------- 153 | 154 | if (module_is_initialized) then 155 | call error_mesg ('atmos_cmip_diag_mod', & 156 | 'module has already been initialized', WARNING) 157 | return 158 | endif 159 | 160 | !----------------------------------------------------------------------- 161 | !----- read namelist ----- 162 | #ifdef INTERNAL_FILE_NML 163 | read (input_nml_file, nml=atmos_cmip_diag_nml, iostat=io) 164 | ierr = check_nml_error (io, 'atmos_cmip_diag_nml') 165 | #else 166 | if (file_exist('input.nml') ) then 167 | iunit = open_namelist_file() 168 | ierr=1 169 | do while (ierr /= 0) 170 | read (iunit, nml=atmos_cmip_diag_nml, iostat=io, end=10) 171 | ierr = check_nml_error (io, 'atmos_cmip_diag_nml') 172 | enddo 173 | 10 call close_file (iunit) 174 | endif 175 | #endif 176 | 177 | !----- write version and namelist to log file ----- 178 | 179 | iunit = stdlog() 180 | call write_version_number ( version, tagname ) 181 | if (mpp_pe() == mpp_root_pe()) write (iunit, nml=atmos_cmip_diag_nml) 182 | 183 | 184 | !----------------------------------------------------------------------- 185 | ! axis and area identifiers 186 | axes3d(1:2) = axes(1:2) 187 | area_id = get_diag_field_id ('dynamics', 'area') 188 | if (area_id .eq. DIAG_FIELD_NOT_FOUND) call error_mesg & 189 | ('atmos_cmip_diag_init', 'diagnostic field "dynamics", '// & 190 | '"area" is not in the diag_table', NOTE) 191 | 192 | !----------------------------------------------------------------------- 193 | ! determine the maximum number of standard pressure levels 194 | ! first get the pressure (based on ps=1000hPa) at top model level 195 | 196 | if (use_extra_levels) then 197 | do k = 23, 1, -1 198 | if (plev23(k) .gt. ptop) then 199 | num_std_plevs = k 200 | exit 201 | endif 202 | enddo 203 | else 204 | num_std_plevs = 17 ! standard ncep 17 levels 205 | endif 206 | 207 | !----------------------------------------------------------------------- 208 | ! vertical coordinate variables 209 | ! cmip levels are defined from the surface to top (flip model values) 210 | 211 | flip = 1 212 | if (flip_cmip_levels) flip = -1 213 | nlev = size(ak,1)-1 214 | 215 | p0 = 100000. 216 | do k = 1, nlev 217 | if (flip_cmip_levels) then 218 | kk = nlev - k + 2 219 | else 220 | kk = k 221 | end if 222 | ap_bnds(1,k) = ak(kk) 223 | ap_bnds(2,k) = ak(kk+flip) 224 | b_bnds(1,k) = bk(kk) 225 | b_bnds(2,k) = bk(kk+flip) 226 | ap(k) = (ap_bnds(1,k)+ap_bnds(2,k))*0.5 227 | b(k) = (b_bnds(1,k)+b_bnds(2,k))*0.5 228 | enddo 229 | lev = ap/p0 + b ! definition for CMIP purposes 230 | lev_bnds = ap_bnds/p0 + b_bnds 231 | levhalf(1:nlev) = ap_bnds(1,:)/p0 + b_bnds(1,:) ! definition at half levels for CMIP purposes 232 | levhalf(nlev+1) = ap_bnds(2,nlev)/p0 + b_bnds(2,nlev) 233 | 234 | !---- register new axes ---- 235 | 236 | ! at full levels (with bounds attribute) 237 | id_lev = diag_axis_init('lev', lev, '1.0', 'Z', & 238 | 'hybrid sigma pressure coordinate', & 239 | direction=-1, set_name='cmip', req='lev_bnds') 240 | call diag_axis_add_attribute (id_lev, 'formula', 'p(n,k,j,i) = ap(k) + b(k)*ps(n,j,i)') 241 | call diag_axis_add_attribute (id_lev, 'formula_terms', 'ap: ap b: b ps: ps') 242 | call diag_axis_add_attribute (id_lev, 'bounds', 'lev_bnds') 243 | call diag_axis_add_attribute (id_lev, 'standard_name', & 244 | 'atmosphere_hybrid_sigma_pressure_coordinate') 245 | 246 | ! at half levels (bounds unknown at top and bottom) 247 | id_levhalf = diag_axis_init('levhalf', levhalf, '1.0', 'Z', & 248 | 'hybrid sigma pressure coordinate', & 249 | direction=-1, set_name='cmip') 250 | call diag_axis_add_attribute (id_levhalf, 'standard_name', & 251 | 'atmosphere_hybrid_sigma_pressure_coordinate') 252 | call diag_axis_add_attribute ( id_levhalf, 'formula', 'p(n,k+1/2,j,i) = ap(k+1/2) + b(k+1/2)*ps(n,j,i)') 253 | call diag_axis_add_attribute ( id_levhalf, 'formula_terms', 'ap: ap_bnds b: b_bnds ps: ps') 254 | 255 | ! vertex number for bounds dimension 256 | id_nv = diag_axis_init('nv', (/1.,2./), 'none', 'N', 'vertex number', set_name='nv') 257 | 258 | ! register new static variables 259 | 260 | id_ap = register_static_field (mod_name, 'ap', (/id_lev/), & 261 | 'vertical coordinate formula term: ap(k)', 'Pa') 262 | 263 | id_b = register_static_field (mod_name, 'b', (/id_lev/), & 264 | 'vertical coordinate formula term: b(k)', '1.0') 265 | 266 | id_ap_bnds = register_static_field (mod_name, 'ap_bnds', (/id_nv,id_lev/), & 267 | 'vertical coordinate formula term: ap(k+1/2)', 'Pa') 268 | 269 | id_b_bnds = register_static_field (mod_name, 'b_bnds', (/id_nv,id_lev/), & 270 | 'vertical coordinate formula term: b(k+1/2)', '1.0') 271 | 272 | id_lev_bnds = register_static_field (mod_name, 'lev_bnds', (/id_nv,id_lev/), & 273 | 'hybrid sigma pressure coordinate', '1.0', & 274 | standard_name='atmosphere_hybrid_sigma_pressure_coordinate') 275 | if (id_lev_bnds > 0) then 276 | call diag_field_add_attribute ( id_lev_bnds, 'formula', 'p(n,k+1/2,j,i) = ap(k+1/2) + b(k+1/2)*ps(n,j,i)') 277 | call diag_field_add_attribute ( id_lev_bnds, 'formula_terms', 'ap: ap_bnds b: b_bnds ps: ps') 278 | endif 279 | 280 | ! save static data 281 | if (id_ap > 0) used = send_data ( id_ap, ap, Time ) 282 | if (id_b > 0) used = send_data ( id_b , b , Time ) 283 | if (id_ap_bnds > 0) used = send_data ( id_ap_bnds, ap_bnds, Time ) 284 | if (id_b_bnds > 0) used = send_data ( id_b_bnds, b_bnds, Time ) 285 | if (id_lev_bnds > 0) used = send_data ( id_lev_bnds, lev_bnds, Time ) 286 | 287 | axes3d(3) = id_lev 288 | cmip_axis_names(0) = 'lev' !mod_name 289 | cmip_axis_data(:,0) = axes3d 290 | 291 | axes3d(3) = id_levhalf 292 | cmip_axis_names(-1) = 'levhalf' !mod_name 293 | cmip_axis_data(:,-1) = axes3d 294 | !----------------------------------------------------------------------- 295 | ! loop through all possible pressure level sets 296 | ! initialize the pressure axis 297 | ! define new 3d grid 298 | ! define all 3d state variable on this 3d grid 299 | 300 | do ind = 1, MAXPLEVS 301 | if (ind .eq. 1) then 302 | np = num_std_plevs 303 | pressure_levels(ind,1:np) = plev23(1:np) 304 | axis_name = 'plev_std' 305 | else if (ind .eq. 2) then 306 | np = size(plev19,1) 307 | pressure_levels(ind,1:np) = plev19 308 | axis_name = 'plev19' 309 | else if (ind .eq. 3) then 310 | np = size(plev8,1) 311 | pressure_levels(ind,1:np) = plev8 312 | axis_name = 'plev8' 313 | else if (ind .eq. 4) then 314 | np = size(plev3,1) 315 | pressure_levels(ind,1:np) = plev3 316 | axis_name = 'plev3' 317 | else if (ind .eq. 5) then 318 | np = size(plev7h,1) 319 | pressure_levels(ind,1:np) = plev7h 320 | axis_name = 'plev7h' 321 | else if (ind .eq. 6) then 322 | np = size(plev23,1) 323 | pressure_levels(ind,1:np) = plev23 324 | axis_name = 'plev23' 325 | endif 326 | 327 | num_pres_levs(ind) = np 328 | id_plev = diag_axis_init(axis_name, pressure_levels(ind,1:np), & 329 | 'Pa', 'z', 'pressure', direction=-1, set_name="dynamics") 330 | 331 | axes3d(3) = id_plev 332 | cmip_axis_names(ind) = trim(axis_name) !trim(mod_name)//'_'//trim(axis_name) 333 | cmip_axis_data(:,ind) = axes3d 334 | 335 | enddo 336 | 337 | if (verbose > 0) then 338 | call error_mesg('atmos_cmip_diag_mod', & 339 | 'cmip_axis_names(-1) = "'//trim(cmip_axis_names(-1))//'"',NOTE) 340 | do ind = 0, MAXPLEVS 341 | call error_mesg('atmos_cmip_diag_mod', & 342 | 'cmip_axis_names('//trim(string(ind))//') = "'//trim(cmip_axis_names(ind))//'"',NOTE) 343 | enddo 344 | endif 345 | 346 | !--- done --- 347 | module_is_initialized=.true. 348 | 349 | !----------------------------------------------------------------------- 350 | 351 | end subroutine atmos_cmip_diag_init 352 | 353 | !####################################################################### 354 | 355 | logical function query_cmip_diag_id (cmip_id, pres) 356 | type(cmip_diag_id_type), intent(in) :: cmip_id 357 | logical, optional, intent(in) :: pres 358 | integer :: is 359 | 360 | is = 0 361 | if (present(pres)) then 362 | if (pres) is = 1 363 | end if 364 | 365 | query_cmip_diag_id = count(cmip_id%field_id(is:) > 0) .gt. 0 366 | 367 | end function query_cmip_diag_id 368 | 369 | !####################################################################### 370 | 371 | integer function register_cmip_diag_field_2d (module_name, field_name, & 372 | Time_init, long_name, units, standard_name, & 373 | missing_value, interp_method, mask_variant, realm) 374 | 375 | character(len=*), intent(in) :: module_name, field_name 376 | type(time_type), intent(in) :: Time_init 377 | character(len=*), intent(in), optional :: long_name, units, standard_name 378 | real, intent(in), optional :: missing_value 379 | character(len=*), intent(in), optional :: interp_method, realm 380 | logical , intent(in), optional :: mask_variant 381 | 382 | real :: mvalue 383 | character(len=64) :: modeling_realm 384 | !----------------------------------------------------------------------- 385 | mvalue = CMOR_MISSING_VALUE; if (present(missing_value)) mvalue = missing_value 386 | modeling_realm = modeling_realm_default 387 | if (present(realm)) modeling_realm = realm 388 | 389 | if (output_modeling_realm) then 390 | register_cmip_diag_field_2d = register_diag_field (module_name, field_name, & 391 | cmip_axis_data(1:2,0), Time_init, long_name=long_name, & 392 | units=units, standard_name=standard_name, area=area_id, & 393 | mask_variant=mask_variant, missing_value=mvalue, & 394 | interp_method=interp_method, realm=modeling_realm ) 395 | else 396 | register_cmip_diag_field_2d = register_diag_field (module_name, field_name, & 397 | cmip_axis_data(1:2,0), Time_init, long_name=long_name, & 398 | units=units, standard_name=standard_name, area=area_id, & 399 | mask_variant=mask_variant, missing_value=mvalue, & 400 | interp_method=interp_method) 401 | endif 402 | 403 | !----------------------------------------------------------------------- 404 | 405 | end function register_cmip_diag_field_2d 406 | 407 | !####################################################################### 408 | 409 | function register_cmip_diag_field_3d (module_name, field_name, & 410 | Time_init, long_name, units, standard_name, & 411 | axis, missing_value, interp_method, mask_variant, & 412 | realm) 413 | 414 | character(len=*), intent(in) :: module_name, field_name 415 | type(time_type), intent(in) :: Time_init 416 | character(len=*), intent(in), optional :: long_name, units, standard_name 417 | real, intent(in), optional :: missing_value 418 | character(len=*), intent(in), optional :: axis ! 'full' or 'half' levels 419 | character(len=*), intent(in), optional :: interp_method ! for fregrid 420 | logical , intent(in), optional :: mask_variant 421 | character(len=*), intent(in), optional :: realm ! modeling realm 422 | 423 | type(cmip_diag_id_type) :: register_cmip_diag_field_3d 424 | integer :: ind, indx, kount 425 | real :: mvalue 426 | character(len=128) :: module_name_table 427 | character(len=4) :: vert_axis 428 | character(len=64) :: modeling_realm 429 | !----------------------------------------------------------------------- 430 | 431 | mvalue = CMOR_MISSING_VALUE; if (present(missing_value)) mvalue = missing_value 432 | vert_axis = 'full'; if (present(axis)) vert_axis = lowercase(trim(axis)) 433 | 434 | modeling_realm = modeling_realm_default 435 | if (present(realm)) modeling_realm = realm 436 | 437 | register_cmip_diag_field_3d%field_id = 0 438 | 439 | ! loop thru all axes 440 | do ind = 0, MAXPLEVS 441 | indx = ind 442 | if (ind .eq. 0 .and. vert_axis .eq. 'half') indx = -1 443 | 444 | module_name_table = trim(module_name) 445 | if (ind .gt. 0) then 446 | module_name_table = trim(module_name_table)//'_'//trim(cmip_axis_names(ind)) 447 | end if 448 | 449 | ! only register fields that are in the diag_table 450 | if ( get_diag_field_id(module_name_table, field_name) .ne. DIAG_FIELD_NOT_FOUND ) then 451 | 452 | if (output_modeling_realm) then 453 | register_cmip_diag_field_3d%field_id(ind) = register_diag_field(module_name_table, field_name, & 454 | cmip_axis_data(:,indx), Time_init, long_name=long_name, units=units, & 455 | standard_name=standard_name, area=area_id, mask_variant=mask_variant, & 456 | missing_value=mvalue, interp_method=interp_method, realm=modeling_realm) 457 | else 458 | register_cmip_diag_field_3d%field_id(ind) = register_diag_field(module_name_table, field_name, & 459 | cmip_axis_data(:,indx), Time_init, long_name=long_name, units=units, & 460 | standard_name=standard_name, area=area_id, mask_variant=mask_variant, & 461 | missing_value=mvalue, interp_method=interp_method) 462 | endif 463 | 464 | if (verbose > 0) call error_mesg('atmos_cmip_diag_mod', & 465 | 'register cmip diag: module='//trim(module_name_table)//', field='//trim(field_name)// & 466 | ', field_id='//trim(string(register_cmip_diag_field_3d%field_id(ind))),NOTE) 467 | 468 | else if (verbose > 1) then 469 | ! for debugging purposes 470 | call error_mesg('atmos_cmip_diag_mod','NOT registering cmip diag: module='// & 471 | trim(module_name_table)//', field='//trim(field_name),NOTE) 472 | endif 473 | enddo 474 | 475 | if (verbose > 1) then 476 | kount = count(register_cmip_diag_field_3d%field_id > 0) 477 | if (query_cmip_diag_id(register_cmip_diag_field_3d)) then 478 | call error_mesg('atmos_cmip_diag_mod','query_cmip_diag_id=TRUE, module='// & 479 | trim(module_name_table)//', field='//trim(field_name)//', kount='//trim(string(kount)),NOTE) 480 | else 481 | call error_mesg('atmos_cmip_diag_mod','query_cmip_diag_id=FALSE, module='// & 482 | trim(module_name_table)//', field='//trim(field_name)//', kount='//trim(string(kount)),NOTE) 483 | endif 484 | endif 485 | 486 | !----------------------------------------------------------------------- 487 | 488 | end function register_cmip_diag_field_3d 489 | 490 | !####################################################################### 491 | 492 | logical function send_cmip_data_3d (cmip_id, field, Time, is_in, js_in, ks_in, phalf, mask, rmask, opt, ext) 493 | 494 | type(cmip_diag_id_type), intent(in) :: cmip_id 495 | real, dimension(:,:,:), intent(in) :: field 496 | type(time_type), intent(in), optional :: Time 497 | integer, intent(in), optional :: is_in, js_in, ks_in 498 | real, dimension(:,:,:), intent(in), optional :: phalf, rmask 499 | logical, dimension(:,:,:), intent(in), optional :: mask 500 | integer, intent(in), optional :: opt ! if opt /= 0 then phalf(i,k,j) 501 | logical, intent(in), optional :: ext 502 | 503 | integer :: ind, id, np, ke 504 | real, allocatable :: pdat(:,:,:) 505 | 506 | !----------------------------------------------------------------------- 507 | 508 | if (.not.module_is_initialized) call error_mesg ('atmos_cmip_diag_mod', & 509 | 'module has not been initialized', FATAL) 510 | 511 | if (present(ks_in)) then 512 | if (ks_in .ne. 1) call error_mesg ('atmos_cmip_diag_mod', & 513 | 'subroutine send_cmip_data_3d does not support optional arg "ks_in"', FATAL) 514 | endif 515 | 516 | if (present(rmask) .and. present(mask)) call error_mesg('atmos_cmip_diag_mod', & 517 | 'rmask and mask can not both be present',FATAL) 518 | 519 | send_cmip_data_3d = .false. 520 | 521 | ! loop thru all axes 522 | do ind = 0, MAXPLEVS 523 | 524 | if (cmip_id%field_id(ind) > 0) then 525 | id = cmip_id%field_id(ind) 526 | 527 | ! pressure level interpolation if "phalf" is present 528 | 529 | ! pressure level interpolation when ind > 0 530 | if (ind > 0) then 531 | if (.not.present(phalf)) then 532 | cycle ! silently skip? 533 | endif 534 | if (present(rmask) .or. present(mask)) call error_mesg('atmos_cmip_diag_mod', & 535 | 'rmask or mask not allowed with pressure interpolation',FATAL) 536 | np = num_pres_levs(ind) 537 | allocate(pdat(size(field,1),size(field,2),np)) 538 | call interpolate_vertical_3d (pressure_levels(ind,1:np), phalf, field, pdat, opt=opt, ext=ext) 539 | send_cmip_data_3d = send_data(id, pdat, Time, is_in=is_in, js_in=js_in, ks_in=ks_in) 540 | deallocate(pdat) 541 | 542 | else 543 | ! save data on model levels (flip data) 544 | if (flip_cmip_levels) then 545 | ke = size(field,3) 546 | if (.not.present(mask) .and. .not.present(rmask)) then 547 | send_cmip_data_3d = send_data(id, field(:,:,ke:1:-1), Time, & 548 | is_in=is_in, js_in=js_in, ks_in=ks_in) 549 | else if (present(mask) .and. .not.present(rmask)) then 550 | send_cmip_data_3d = send_data(id, field(:,:,ke:1:-1), Time, & 551 | is_in=is_in, js_in=js_in, ks_in=ks_in, mask=mask(:,:,ke:1:-1)) 552 | else if (.not.present(mask) .and. present(rmask)) then 553 | send_cmip_data_3d = send_data(id, field(:,:,ke:1:-1), Time, & 554 | is_in=is_in, js_in=js_in, ks_in=ks_in, rmask=rmask(:,:,ke:1:-1)) 555 | endif 556 | else 557 | send_cmip_data_3d = send_data(id, field(:,:,:), Time, & 558 | is_in=is_in, js_in=js_in, ks_in=ks_in, mask=mask, rmask=rmask) 559 | endif 560 | endif 561 | else 562 | send_cmip_data_3d = .false. 563 | endif 564 | enddo 565 | 566 | !----------------------------------------------------------------------- 567 | 568 | end function send_cmip_data_3d 569 | 570 | !####################################################################### 571 | 572 | subroutine atmos_cmip_diag_end 573 | 574 | ! do nothing, no way to unregister diag fields 575 | 576 | end subroutine atmos_cmip_diag_end 577 | 578 | !####################################################################### 579 | 580 | subroutine dealloc_cmip_diag_id_type (cmip_id) 581 | class(cmip_diag_id_type), intent(inout) :: cmip_id 582 | 583 | !deallocate(cmip_id%field_id) 584 | 585 | end subroutine dealloc_cmip_diag_id_type 586 | 587 | !####################################################################### 588 | ! wrapper for different vertical interpolation routines 589 | ! opt = 0 for standard indexing of peln(i,j,k) -- this is the default 590 | ! opt /= 0 for FV-core indexing of peln(i,k,j) 591 | ! ext = flag to extrapolate data below (and above) input data (default: false) 592 | 593 | subroutine interpolate_vertical_3d (plev, peln, a, ap, opt, ext) 594 | real, intent(in), dimension(:) :: plev ! target p-levels 595 | real, intent(in), dimension(:,:,:) :: peln ! log(phalf), model half levels 596 | real, intent(in), dimension(:,:,:) :: a ! input data 597 | real, intent(out), dimension(:,:,:) :: ap ! output data on p-levels 598 | integer, intent(in), optional :: opt ! peln indexing 599 | logical, intent(in), optional :: ext ! extrapolate? 600 | 601 | integer :: im, jm, km, kp 602 | integer :: iopt 603 | 604 | iopt = 0; if (present(opt)) iopt = opt 605 | im = size(a,1) 606 | jm = size(a,2) 607 | km = size(a,3) 608 | kp = size(plev,1) 609 | 610 | if (iopt .eq. 0) then 611 | if (size(peln,2).eq.jm .and. size(peln,3).eq.km+1) then 612 | call interpolate_vertical (im, jm, km, kp, plev, peln, a, ap) ! peln(im,jm,km+1) 613 | !else if (size(peln,2).eq.jm .and. size(peln,3).eq.km) then 614 | ! call interpolate_vertical_half (im, jm, km, kp, plev, peln, a, ap, ext) ! peln(im,jm,km) 615 | else 616 | call error_mesg('atmos_cmip_diag_mod','invalid indexing option and/or array sizes',FATAL) 617 | endif 618 | 619 | else 620 | if (size(peln,3).eq.jm .and. size(peln,2).eq.km+1) then 621 | call interpolate_vertical_fv (im, jm, km, kp, plev, peln, a, ap) ! peln(im,km+1,jm) 622 | else if (size(peln,3).eq.jm .and. size(peln,2).eq.km) then 623 | call interpolate_vertical_half_fv (im, jm, km, kp, plev, peln, a, ap, ext) ! peln(im,km,jm) 624 | else 625 | call error_mesg('atmos_cmip_diag_mod','invalid indexing option and/or array sizes',FATAL) 626 | endif 627 | 628 | endif 629 | 630 | end subroutine interpolate_vertical_3d 631 | 632 | !####################################################################### 633 | ! a (im, jm, km ) <-- input data on FULL model levels 634 | ! peln (im, jm, km+1) <-- standard indexing (i,j,k) 635 | ! km = number of FULL levels 636 | 637 | subroutine interpolate_vertical (im, jm, km, np, plev, peln, a, ap, ext) 638 | integer, intent(in) :: im, jm, km, np 639 | real, intent(in), dimension(np) :: plev ! target p-levels 640 | real, intent(in), dimension(im,jm,km+1) :: peln ! log(phaf), model half levels 641 | real, intent(in), dimension(im,jm,km) :: a ! input data on model levels 642 | real, intent(out), dimension(im,jm,np) :: ap ! output data on p-levels 643 | logical, intent(in), optional :: ext 644 | 645 | real :: pm(km), logp 646 | integer :: i, j, k, kp 647 | logical :: extrap 648 | 649 | extrap = .false.; if (present(ext)) extrap = ext 650 | 651 | do kp = 1, np 652 | logp = log(plev(kp)) 653 | 654 | do j = 1, jm 655 | do i = 1, im 656 | pm = 0.5*(peln(i,j,1:km)+peln(i,j,2:km+1)) 657 | include "atmos_cmip_interp.inc" 658 | enddo 659 | enddo 660 | enddo 661 | 662 | end subroutine interpolate_vertical 663 | 664 | !####################################################################### 665 | ! a (im, jm, km) <-- input data on FULL model levels 666 | ! peln (im, km+1, jm) <-- FV core indexing 667 | ! km = number of FULL levels 668 | 669 | subroutine interpolate_vertical_fv (im, jm, km, np, plev, peln, a, ap, ext) 670 | integer, intent(in) :: im, jm, km, np 671 | real, intent(in), dimension(np) :: plev ! target p-levels 672 | real, intent(in), dimension(im,km+1,jm) :: peln ! log(phaf), model half levels 673 | real, intent(in), dimension(im,jm,km) :: a ! input data on model levels 674 | real, intent(out), dimension(im,jm,np) :: ap ! output data on p-levels 675 | logical, intent(in), optional :: ext 676 | 677 | real :: pm(km), logp 678 | integer :: i, j, k, kp 679 | logical :: extrap 680 | 681 | extrap = .false.; if (present(ext)) extrap = ext 682 | 683 | do kp = 1, np 684 | logp = log(plev(kp)) 685 | 686 | do j = 1, jm 687 | do i = 1, im 688 | pm = 0.5*(peln(i,1:km,j)+peln(i,2:km+1,j)) 689 | include "atmos_cmip_interp.inc" 690 | enddo 691 | enddo 692 | enddo 693 | 694 | end subroutine interpolate_vertical_fv 695 | 696 | !####################################################################### 697 | ! a (im, jm, km) <-- input data on HALF model levels 698 | ! peln (im, km, jm) <-- FV core indexing 699 | ! km = number of HALF levels 700 | 701 | subroutine interpolate_vertical_half_fv (im, jm, km, np, plev, peln, a, ap, ext) 702 | 703 | integer, intent(in) :: im, jm, km, np 704 | real, intent(in), dimension(np) :: plev ! target p-levels 705 | real, intent(in), dimension(im,km,jm) :: peln ! log(phaf), model half levels 706 | real, intent(in), dimension(im,jm,km) :: a ! input data on model HALF levels 707 | real, intent(out), dimension(im,jm,np) :: ap ! output data on p-levels 708 | logical, intent(in), optional :: ext 709 | 710 | real :: pm(km), logp 711 | integer :: i, j, k, kp 712 | logical :: extrap 713 | 714 | extrap = .false.; if (present(ext)) extrap = ext 715 | 716 | do kp = 1, np 717 | logp = log(plev(kp)) 718 | 719 | do j = 1, jm 720 | do i = 1, im 721 | pm = peln(i,:,j) 722 | include "atmos_cmip_interp.inc" 723 | enddo 724 | enddo 725 | enddo 726 | 727 | end subroutine interpolate_vertical_half_fv 728 | 729 | !####################################################################### 730 | 731 | end module atmos_cmip_diag_mod 732 | 733 | -------------------------------------------------------------------------------- /src_override/fv_tracer2d.F90: -------------------------------------------------------------------------------- 1 | !*********************************************************************** 2 | !* GNU Lesser General Public License 3 | !* 4 | !* This file is part of the FV3 dynamical core. 5 | !* 6 | !* The FV3 dynamical core is free software: you can redistribute it 7 | !* and/or modify it under the terms of the 8 | !* GNU Lesser General Public License as published by the 9 | !* Free Software Foundation, either version 3 of the License, or 10 | !* (at your option) any later version. 11 | !* 12 | !* The FV3 dynamical core is distributed in the hope that it will be 13 | !* useful, but WITHOUT ANY WARRANTY; without even the implied warranty 14 | !* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 15 | !* See the GNU General Public License for more details. 16 | !* 17 | !* You should have received a copy of the GNU Lesser General Public 18 | !* License along with the FV3 dynamical core. 19 | !* If not, see . 20 | !*********************************************************************** 21 | 22 | module fv_tracer2d_mod 23 | use tp_core_mod, only: fv_tp_2d, copy_corners 24 | use fv_mp_mod, only: mp_reduce_max 25 | use fv_mp_mod, only: mp_gather, is_master 26 | use fv_mp_mod, only: group_halo_update_type 27 | use fv_mp_mod, only: start_group_halo_update, complete_group_halo_update 28 | use mpp_domains_mod, only: mpp_update_domains, CGRID_NE, domain2d 29 | use fv_timing_mod, only: timing_on, timing_off 30 | use boundary_mod, only: nested_grid_BC_apply_intT 31 | use fv_regional_mod, only: regional_boundary_update 32 | use fv_regional_mod, only: current_time_in_seconds 33 | use fv_arrays_mod, only: fv_grid_type, fv_nest_type, fv_atmos_type, fv_grid_bounds_type 34 | use mpp_mod, only: mpp_error, FATAL, mpp_broadcast, mpp_send, mpp_recv, mpp_sum, mpp_max 35 | 36 | implicit none 37 | private 38 | 39 | public :: tracer_2d, tracer_2d_nested, tracer_2d_1L 40 | 41 | real, allocatable, dimension(:,:,:) :: nest_fx_west_accum, nest_fx_east_accum, nest_fx_south_accum, nest_fx_north_accum 42 | 43 | contains 44 | 45 | !----------------------------------------------------------------------- 46 | ! !ROUTINE: Perform 2D horizontal-to-lagrangian transport 47 | !----------------------------------------------------------------------- 48 | 49 | 50 | 51 | subroutine tracer_2d_1L(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, & 52 | nq, hord, q_split, dt, id_divg, q_pack, dp1_pack, nord_tr, trdm, lim_fac) 53 | 54 | type(fv_grid_bounds_type), intent(IN) :: bd 55 | integer, intent(IN) :: npx 56 | integer, intent(IN) :: npy 57 | integer, intent(IN) :: npz 58 | integer, intent(IN) :: nq ! number of tracers to be advected 59 | integer, intent(IN) :: hord, nord_tr 60 | integer, intent(IN) :: q_split 61 | integer, intent(IN) :: id_divg 62 | real , intent(IN) :: dt, trdm 63 | real , intent(IN) :: lim_fac 64 | type(group_halo_update_type), intent(inout) :: q_pack, dp1_pack 65 | real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq) ! Tracers 66 | real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! DELP before dyn_core 67 | real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz) ! Mass Flux X-Dir 68 | real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz) ! Mass Flux Y-Dir 69 | real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) ! Courant Number X-Dir 70 | real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz) ! Courant Number Y-Dir 71 | type(fv_grid_type), intent(IN), target :: gridstruct 72 | type(domain2d), intent(INOUT) :: domain 73 | 74 | ! Local Arrays 75 | real :: qn2(bd%isd:bd%ied,bd%jsd:bd%jed,nq) ! 3D tracers 76 | real :: dp2(bd%is:bd%ie,bd%js:bd%je) 77 | real :: fx(bd%is:bd%ie+1,bd%js:bd%je ) 78 | real :: fy(bd%is:bd%ie , bd%js:bd%je+1) 79 | real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed) 80 | real :: ra_y(bd%isd:bd%ied,bd%js:bd%je) 81 | real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) 82 | real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz) 83 | real :: cmax(npz) 84 | real :: frac 85 | integer :: nsplt 86 | integer :: i,j,k,it,iq 87 | 88 | real, pointer, dimension(:,:) :: area, rarea 89 | real, pointer, dimension(:,:,:) :: sin_sg 90 | real, pointer, dimension(:,:) :: dxa, dya, dx, dy 91 | 92 | integer :: is, ie, js, je 93 | integer :: isd, ied, jsd, jed 94 | 95 | is = bd%is 96 | ie = bd%ie 97 | js = bd%js 98 | je = bd%je 99 | isd = bd%isd 100 | ied = bd%ied 101 | jsd = bd%jsd 102 | jed = bd%jed 103 | 104 | area => gridstruct%area 105 | rarea => gridstruct%rarea 106 | 107 | sin_sg => gridstruct%sin_sg 108 | dxa => gridstruct%dxa 109 | dya => gridstruct%dya 110 | dx => gridstruct%dx 111 | dy => gridstruct%dy 112 | 113 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & 114 | !$OMP sin_sg,cy,yfx,dya,dx,cmax) 115 | do k=1,npz 116 | do j=jsd,jed 117 | do i=is,ie+1 118 | if (cx(i,j,k) > 0.) then 119 | xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3) 120 | else 121 | xfx(i,j,k) = cx(i,j,k)*dxa(i, j)*dy(i,j)*sin_sg(i, j,1) 122 | endif 123 | enddo 124 | enddo 125 | do j=js,je+1 126 | do i=isd,ied 127 | if (cy(i,j,k) > 0.) then 128 | yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4) 129 | else 130 | yfx(i,j,k) = cy(i,j,k)*dya(i,j )*dx(i,j)*sin_sg(i,j, 2) 131 | endif 132 | enddo 133 | enddo 134 | 135 | cmax(k) = 0. 136 | if ( k < npz/6 ) then 137 | do j=js,je 138 | do i=is,ie 139 | cmax(k) = max( cmax(k), abs(cx(i,j,k)), abs(cy(i,j,k)) ) 140 | enddo 141 | enddo 142 | else 143 | do j=js,je 144 | do i=is,ie 145 | cmax(k) = max( cmax(k), max(abs(cx(i,j,k)),abs(cy(i,j,k)))+1.-sin_sg(i,j,5) ) 146 | enddo 147 | enddo 148 | endif 149 | enddo ! k-loop 150 | 151 | if (trdm>1.e-4) then 152 | call timing_on('COMM_TOTAL') 153 | call timing_on('COMM_TRACER') 154 | call complete_group_halo_update(dp1_pack, domain) 155 | call timing_off('COMM_TRACER') 156 | call timing_off('COMM_TOTAL') 157 | 158 | endif 159 | call mp_reduce_max(cmax,npz) 160 | 161 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx, & 162 | !$OMP cy,yfx,mfx,mfy,cmax) & 163 | !$OMP private(nsplt, frac) 164 | do k=1,npz 165 | 166 | nsplt = int(1. + cmax(k)) 167 | if ( nsplt > 1 ) then 168 | frac = 1. / real(nsplt) 169 | do j=jsd,jed 170 | do i=is,ie+1 171 | cx(i,j,k) = cx(i,j,k) * frac 172 | xfx(i,j,k) = xfx(i,j,k) * frac 173 | enddo 174 | enddo 175 | do j=js,je 176 | do i=is,ie+1 177 | mfx(i,j,k) = mfx(i,j,k) * frac 178 | enddo 179 | enddo 180 | do j=js,je+1 181 | do i=isd,ied 182 | cy(i,j,k) = cy(i,j,k) * frac 183 | yfx(i,j,k) = yfx(i,j,k) * frac 184 | enddo 185 | enddo 186 | do j=js,je+1 187 | do i=is,ie 188 | mfy(i,j,k) = mfy(i,j,k) * frac 189 | enddo 190 | enddo 191 | endif 192 | 193 | enddo 194 | call timing_on('COMM_TOTAL') 195 | call timing_on('COMM_TRACER') 196 | call complete_group_halo_update(q_pack, domain) 197 | call timing_off('COMM_TRACER') 198 | call timing_off('COMM_TOTAL') 199 | 200 | ! Begin k-independent tracer transport; can not be OpenMPed because the mpp_update call. 201 | do k=1,npz 202 | 203 | !$OMP parallel do default(none) shared(k,is,ie,js,je,isd,ied,jsd,jed,xfx,area,yfx,ra_x,ra_y) 204 | do j=jsd,jed 205 | do i=is,ie 206 | ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k) 207 | enddo 208 | if ( j>=js .and. j<=je ) then 209 | do i=isd,ied 210 | ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k) 211 | enddo 212 | endif 213 | enddo 214 | 215 | nsplt = int(1. + cmax(k)) 216 | do it=1,nsplt 217 | 218 | !$OMP parallel do default(none) shared(k,is,ie,js,je,rarea,mfx,mfy,dp1,dp2) 219 | do j=js,je 220 | do i=is,ie 221 | dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j) 222 | enddo 223 | enddo 224 | 225 | !$OMP parallel do default(none) shared(k,nsplt,it,is,ie,js,je,isd,ied,jsd,jed,npx,npy,cx,xfx,hord,trdm, & 226 | !$OMP nord_tr,nq,gridstruct,bd,cy,yfx,mfx,mfy,qn2,q,ra_x,ra_y,dp1,dp2,rarea,lim_fac) & 227 | !$OMP private(fx,fy) 228 | do iq=1,nq 229 | if ( nsplt /= 1 ) then 230 | if ( it==1 ) then 231 | do j=jsd,jed 232 | do i=isd,ied 233 | qn2(i,j,iq) = q(i,j,k,iq) 234 | enddo 235 | enddo 236 | endif 237 | call fv_tp_2d(qn2(isd,jsd,iq), cx(is,jsd,k), cy(isd,js,k), & 238 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 239 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k)) 240 | if ( it < nsplt ) then ! not last call 241 | do j=js,je 242 | do i=is,ie 243 | qn2(i,j,iq) = (qn2(i,j,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j) 244 | enddo 245 | enddo 246 | else 247 | do j=js,je 248 | do i=is,ie 249 | q(i,j,k,iq) = (qn2(i,j,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j) 250 | enddo 251 | enddo 252 | endif 253 | else 254 | call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), & 255 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 256 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k)) 257 | do j=js,je 258 | do i=is,ie 259 | q(i,j,k,iq) = (q(i,j,k,iq)*dp1(i,j,k)+(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j))/dp2(i,j) 260 | enddo 261 | enddo 262 | endif 263 | enddo ! tracer-loop 264 | 265 | if ( it < nsplt ) then ! not last call 266 | do j=js,je 267 | do i=is,ie 268 | dp1(i,j,k) = dp2(i,j) 269 | enddo 270 | enddo 271 | call timing_on('COMM_TOTAL') 272 | call timing_on('COMM_TRACER') 273 | call mpp_update_domains(qn2, domain) 274 | call timing_off('COMM_TRACER') 275 | call timing_off('COMM_TOTAL') 276 | endif 277 | enddo ! time-split loop 278 | enddo ! k-loop 279 | 280 | end subroutine tracer_2d_1L 281 | 282 | 283 | subroutine tracer_2d(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, & 284 | nq, hord, q_split, dt, id_divg, q_pack, dp1_pack, nord_tr, trdm, lim_fac) 285 | 286 | type(fv_grid_bounds_type), intent(IN) :: bd 287 | integer, intent(IN) :: npx 288 | integer, intent(IN) :: npy 289 | integer, intent(IN) :: npz 290 | integer, intent(IN) :: nq ! number of tracers to be advected 291 | integer, intent(IN) :: hord, nord_tr 292 | integer, intent(IN) :: q_split 293 | integer, intent(IN) :: id_divg 294 | real , intent(IN) :: dt, trdm 295 | real , intent(IN) :: lim_fac 296 | type(group_halo_update_type), intent(inout) :: q_pack, dp1_pack 297 | real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq) ! Tracers 298 | real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! DELP before dyn_core 299 | real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz) ! Mass Flux X-Dir 300 | real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz) ! Mass Flux Y-Dir 301 | real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) ! Courant Number X-Dir 302 | real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz) ! Courant Number Y-Dir 303 | type(fv_grid_type), intent(IN), target :: gridstruct 304 | type(domain2d), intent(INOUT) :: domain 305 | 306 | ! Local Arrays 307 | real :: dp2(bd%is:bd%ie,bd%js:bd%je) 308 | real :: fx(bd%is:bd%ie+1,bd%js:bd%je ) 309 | real :: fy(bd%is:bd%ie , bd%js:bd%je+1) 310 | real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed) 311 | real :: ra_y(bd%isd:bd%ied,bd%js:bd%je) 312 | real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) 313 | real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz) 314 | real :: cmax(npz) 315 | real :: c_global 316 | real :: frac, rdt 317 | integer :: ksplt(npz) 318 | integer :: nsplt 319 | integer :: i,j,k,it,iq 320 | 321 | real, pointer, dimension(:,:) :: area, rarea 322 | real, pointer, dimension(:,:,:) :: sin_sg 323 | real, pointer, dimension(:,:) :: dxa, dya, dx, dy 324 | 325 | integer :: is, ie, js, je 326 | integer :: isd, ied, jsd, jed 327 | 328 | is = bd%is 329 | ie = bd%ie 330 | js = bd%js 331 | je = bd%je 332 | isd = bd%isd 333 | ied = bd%ied 334 | jsd = bd%jsd 335 | jed = bd%jed 336 | 337 | area => gridstruct%area 338 | rarea => gridstruct%rarea 339 | 340 | sin_sg => gridstruct%sin_sg 341 | dxa => gridstruct%dxa 342 | dya => gridstruct%dya 343 | dx => gridstruct%dx 344 | dy => gridstruct%dy 345 | 346 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & 347 | !$OMP sin_sg,cy,yfx,dya,dx,cmax,q_split,ksplt) 348 | do k=1,npz 349 | do j=jsd,jed 350 | do i=is,ie+1 351 | if (cx(i,j,k) > 0.) then 352 | xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3) 353 | else 354 | xfx(i,j,k) = cx(i,j,k)*dxa(i,j)*dy(i,j)*sin_sg(i,j,1) 355 | endif 356 | enddo 357 | enddo 358 | do j=js,je+1 359 | do i=isd,ied 360 | if (cy(i,j,k) > 0.) then 361 | yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4) 362 | else 363 | yfx(i,j,k) = cy(i,j,k)*dya(i,j)*dx(i,j)*sin_sg(i,j,2) 364 | endif 365 | enddo 366 | enddo 367 | 368 | if ( q_split == 0 ) then 369 | cmax(k) = 0. 370 | if ( k < npz/6 ) then 371 | do j=js,je 372 | do i=is,ie 373 | cmax(k) = max( cmax(k), abs(cx(i,j,k)), abs(cy(i,j,k)) ) 374 | enddo 375 | enddo 376 | else 377 | do j=js,je 378 | do i=is,ie 379 | cmax(k) = max( cmax(k), max(abs(cx(i,j,k)),abs(cy(i,j,k)))+1.-sin_sg(i,j,5) ) 380 | enddo 381 | enddo 382 | endif 383 | endif 384 | ksplt(k) = 1 385 | 386 | enddo 387 | 388 | !-------------------------------------------------------------------------------- 389 | 390 | ! Determine global nsplt: 391 | if ( q_split == 0 ) then 392 | call mp_reduce_max(cmax,npz) 393 | ! find global max courant number and define nsplt to scale cx,cy,mfx,mfy 394 | c_global = cmax(1) 395 | if ( npz /= 1 ) then ! if NOT shallow water test case 396 | do k=2,npz 397 | c_global = max(cmax(k), c_global) 398 | enddo 399 | endif 400 | nsplt = int(1. + c_global) 401 | if ( is_master() .and. nsplt > 4 ) write(*,*) 'Tracer_2d_split=', nsplt, c_global 402 | else 403 | nsplt = q_split 404 | endif 405 | 406 | !-------------------------------------------------------------------------------- 407 | 408 | if( nsplt /= 1 ) then 409 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,mfx,cy,yfx,mfy,cmax,nsplt,ksplt,q_split) & 410 | !$OMP private( frac ) 411 | do k=1,npz 412 | 413 | if (q_split > 0) then 414 | ksplt(k) = nsplt 415 | else 416 | ksplt(k) = int(1. + cmax(k)) 417 | end if 418 | frac = 1. / real(ksplt(k)) 419 | 420 | do j=jsd,jed 421 | do i=is,ie+1 422 | cx(i,j,k) = cx(i,j,k) * frac 423 | xfx(i,j,k) = xfx(i,j,k) * frac 424 | enddo 425 | enddo 426 | do j=js,je 427 | do i=is,ie+1 428 | mfx(i,j,k) = mfx(i,j,k) * frac 429 | enddo 430 | enddo 431 | 432 | do j=js,je+1 433 | do i=isd,ied 434 | cy(i,j,k) = cy(i,j,k) * frac 435 | yfx(i,j,k) = yfx(i,j,k) * frac 436 | enddo 437 | enddo 438 | do j=js,je+1 439 | do i=is,ie 440 | mfy(i,j,k) = mfy(i,j,k) * frac 441 | enddo 442 | enddo 443 | 444 | enddo 445 | endif 446 | 447 | if (trdm>1.e-4) then 448 | call timing_on('COMM_TOTAL') 449 | call timing_on('COMM_TRACER') 450 | call complete_group_halo_update(dp1_pack, domain) 451 | call timing_off('COMM_TRACER') 452 | call timing_off('COMM_TOTAL') 453 | 454 | endif 455 | do it=1,nsplt 456 | call timing_on('COMM_TOTAL') 457 | call timing_on('COMM_TRACER') 458 | call complete_group_halo_update(q_pack, domain) 459 | call timing_off('COMM_TRACER') 460 | call timing_off('COMM_TOTAL') 461 | 462 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq,ksplt,& 463 | !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) & 464 | !$OMP private(dp2, ra_x, ra_y, fx, fy) 465 | do k=1,npz 466 | 467 | if ( it .le. ksplt(k) ) then 468 | 469 | do j=js,je 470 | do i=is,ie 471 | dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j) 472 | enddo 473 | enddo 474 | 475 | do j=jsd,jed 476 | do i=is,ie 477 | ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k) 478 | enddo 479 | enddo 480 | do j=js,je 481 | do i=isd,ied 482 | ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k) 483 | enddo 484 | enddo 485 | 486 | do iq=1,nq 487 | if ( it==1 .and. trdm>1.e-4 ) then 488 | call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), & 489 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 490 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k), & 491 | mass=dp1(isd,jsd,k), nord=nord_tr, damp_c=trdm) 492 | else 493 | call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), & 494 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 495 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k)) 496 | endif 497 | do j=js,je 498 | do i=is,ie 499 | q(i,j,k,iq) = ( q(i,j,k,iq)*dp1(i,j,k) + & 500 | (fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j) )/dp2(i,j) 501 | enddo 502 | enddo 503 | enddo 504 | 505 | if ( it /= nsplt ) then 506 | do j=js,je 507 | do i=is,ie 508 | dp1(i,j,k) = dp2(i,j) 509 | enddo 510 | enddo 511 | endif 512 | 513 | endif ! ksplt 514 | 515 | enddo ! npz 516 | 517 | if ( it /= nsplt ) then 518 | call timing_on('COMM_TOTAL') 519 | call timing_on('COMM_TRACER') 520 | call start_group_halo_update(q_pack, q, domain) 521 | call timing_off('COMM_TRACER') 522 | call timing_off('COMM_TOTAL') 523 | endif 524 | 525 | enddo ! nsplt 526 | 527 | 528 | end subroutine tracer_2d 529 | 530 | 531 | subroutine tracer_2d_nested(q, dp1, mfx, mfy, cx, cy, gridstruct, bd, domain, npx, npy, npz, & 532 | nq, hord, q_split, dt, id_divg, q_pack, dp1_pack, nord_tr, trdm, & 533 | k_split, neststruct, parent_grid, n_map, lim_fac) 534 | 535 | type(fv_grid_bounds_type), intent(IN) :: bd 536 | integer, intent(IN) :: npx 537 | integer, intent(IN) :: npy 538 | integer, intent(IN) :: npz 539 | integer, intent(IN) :: nq ! number of tracers to be advected 540 | integer, intent(IN) :: hord, nord_tr 541 | integer, intent(IN) :: q_split, k_split, n_map 542 | integer, intent(IN) :: id_divg 543 | real , intent(IN) :: dt, trdm 544 | real , intent(IN) :: lim_fac 545 | type(group_halo_update_type), intent(inout) :: q_pack, dp1_pack 546 | real , intent(INOUT) :: q(bd%isd:bd%ied,bd%jsd:bd%jed,npz,nq) ! Tracers 547 | real , intent(INOUT) :: dp1(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! DELP before dyn_core 548 | real , intent(INOUT) :: mfx(bd%is:bd%ie+1,bd%js:bd%je, npz) ! Mass Flux X-Dir 549 | real , intent(INOUT) :: mfy(bd%is:bd%ie ,bd%js:bd%je+1,npz) ! Mass Flux Y-Dir 550 | real , intent(INOUT) :: cx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) ! Courant Number X-Dir 551 | real , intent(INOUT) :: cy(bd%isd:bd%ied,bd%js :bd%je +1,npz) ! Courant Number Y-Dir 552 | type(fv_grid_type), intent(IN), target :: gridstruct 553 | type(fv_nest_type), intent(INOUT) :: neststruct 554 | type(fv_atmos_type), pointer, intent(IN) :: parent_grid 555 | type(domain2d), intent(INOUT) :: domain 556 | 557 | ! Local Arrays 558 | real :: dp2(bd%is:bd%ie,bd%js:bd%je) 559 | real :: fx(bd%is:bd%ie+1,bd%js:bd%je ) 560 | real :: fy(bd%is:bd%ie , bd%js:bd%je+1) 561 | real :: ra_x(bd%is:bd%ie,bd%jsd:bd%jed) 562 | real :: ra_y(bd%isd:bd%ied,bd%js:bd%je) 563 | real :: xfx(bd%is:bd%ie+1,bd%jsd:bd%jed ,npz) 564 | real :: yfx(bd%isd:bd%ied,bd%js: bd%je+1, npz) 565 | real :: cmax(npz) 566 | real :: cmax_t 567 | real :: c_global 568 | real :: frac, rdt 569 | real :: reg_bc_update_time 570 | integer :: nsplt, nsplt_parent, msg_split_steps = 1 571 | integer :: i,j,k,it,iq 572 | 573 | real, pointer, dimension(:,:) :: area, rarea 574 | real, pointer, dimension(:,:,:) :: sin_sg 575 | real, pointer, dimension(:,:) :: dxa, dya, dx, dy 576 | 577 | integer :: is, ie, js, je 578 | integer :: isd, ied, jsd, jed 579 | 580 | is = bd%is 581 | ie = bd%ie 582 | js = bd%js 583 | je = bd%je 584 | isd = bd%isd 585 | ied = bd%ied 586 | jsd = bd%jsd 587 | jed = bd%jed 588 | 589 | area => gridstruct%area 590 | rarea => gridstruct%rarea 591 | 592 | sin_sg => gridstruct%sin_sg 593 | dxa => gridstruct%dxa 594 | dya => gridstruct%dya 595 | dx => gridstruct%dx 596 | dy => gridstruct%dy 597 | 598 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,xfx,dxa,dy, & 599 | !$OMP sin_sg,cy,yfx,dya,dx) 600 | do k=1,npz 601 | do j=jsd,jed 602 | do i=is,ie+1 603 | if (cx(i,j,k) > 0.) then 604 | xfx(i,j,k) = cx(i,j,k)*dxa(i-1,j)*dy(i,j)*sin_sg(i-1,j,3) 605 | else 606 | xfx(i,j,k) = cx(i,j,k)*dxa(i,j)*dy(i,j)*sin_sg(i,j,1) 607 | endif 608 | enddo 609 | enddo 610 | do j=js,je+1 611 | do i=isd,ied 612 | if (cy(i,j,k) > 0.) then 613 | yfx(i,j,k) = cy(i,j,k)*dya(i,j-1)*dx(i,j)*sin_sg(i,j-1,4) 614 | else 615 | yfx(i,j,k) = cy(i,j,k)*dya(i,j)*dx(i,j)*sin_sg(i,j,2) 616 | endif 617 | enddo 618 | enddo 619 | enddo 620 | 621 | !-------------------------------------------------------------------------------- 622 | if ( q_split == 0 ) then 623 | ! Determine nsplt 624 | 625 | !$OMP parallel do default(none) shared(is,ie,js,je,npz,cmax,cx,cy,sin_sg) & 626 | !$OMP private(cmax_t ) 627 | do k=1,npz 628 | cmax(k) = 0. 629 | if ( k < 4 ) then 630 | ! Top layers: C < max( abs(c_x), abs(c_y) ) 631 | do j=js,je 632 | do i=is,ie 633 | cmax_t = max( abs(cx(i,j,k)), abs(cy(i,j,k)) ) 634 | cmax(k) = max( cmax_t, cmax(k) ) 635 | enddo 636 | enddo 637 | else 638 | do j=js,je 639 | do i=is,ie 640 | cmax_t = max(abs(cx(i,j,k)), abs(cy(i,j,k))) + 1.-sin_sg(i,j,5) 641 | cmax(k) = max( cmax_t, cmax(k) ) 642 | enddo 643 | enddo 644 | endif 645 | enddo 646 | call mp_reduce_max(cmax,npz) 647 | 648 | ! find global max courant number and define nsplt to scale cx,cy,mfx,mfy 649 | c_global = cmax(1) 650 | if ( npz /= 1 ) then ! if NOT shallow water test case 651 | do k=2,npz 652 | c_global = max(cmax(k), c_global) 653 | enddo 654 | endif 655 | nsplt = int(1. + c_global) 656 | if ( is_master() .and. nsplt > 3 ) write(*,*) 'Tracer_2d_split=', nsplt, c_global 657 | else 658 | nsplt = q_split 659 | if (gridstruct%nested .and. neststruct%nestbctype > 1) msg_split_steps = max(q_split/parent_grid%flagstruct%q_split,1) 660 | endif 661 | 662 | !-------------------------------------------------------------------------------- 663 | 664 | frac = 1. / real(nsplt) 665 | 666 | if( nsplt /= 1 ) then 667 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,cx,frac,xfx,mfx,cy,yfx,mfy) 668 | do k=1,npz 669 | do j=jsd,jed 670 | do i=is,ie+1 671 | cx(i,j,k) = cx(i,j,k) * frac 672 | xfx(i,j,k) = xfx(i,j,k) * frac 673 | enddo 674 | enddo 675 | do j=js,je 676 | do i=is,ie+1 677 | mfx(i,j,k) = mfx(i,j,k) * frac 678 | enddo 679 | enddo 680 | 681 | do j=js,je+1 682 | do i=isd,ied 683 | cy(i,j,k) = cy(i,j,k) * frac 684 | yfx(i,j,k) = yfx(i,j,k) * frac 685 | enddo 686 | enddo 687 | 688 | do j=js,je+1 689 | do i=is,ie 690 | mfy(i,j,k) = mfy(i,j,k) * frac 691 | enddo 692 | enddo 693 | enddo 694 | endif 695 | 696 | 697 | do it=1,nsplt 698 | if ( gridstruct%nested ) then 699 | neststruct%tracer_nest_timestep = neststruct%tracer_nest_timestep + 1 700 | end if 701 | call timing_on('COMM_TOTAL') 702 | call timing_on('COMM_TRACER') 703 | call complete_group_halo_update(q_pack, domain) 704 | call timing_off('COMM_TRACER') 705 | call timing_off('COMM_TOTAL') 706 | 707 | if (gridstruct%nested) then 708 | do iq=1,nq 709 | call nested_grid_BC_apply_intT(q(isd:ied,jsd:jed,:,iq), & 710 | 0, 0, npx, npy, npz, bd, & 711 | real(neststruct%tracer_nest_timestep)+real(nsplt*k_split), real(nsplt*k_split), & 712 | neststruct%q_BC(iq), bctype=neststruct%nestbctype ) 713 | enddo 714 | endif 715 | 716 | if (gridstruct%regional) then 717 | !This is more accurate than the nested BC calculation 718 | ! since it takes into account varying nsplit 719 | reg_bc_update_time=current_time_in_seconds+(real(n_map-1) + real(it-1)*frac)*dt 720 | do iq=1,nq 721 | call regional_boundary_update(q(:,:,:,iq), 'q', & 722 | isd, ied, jsd, jed, npz, & 723 | is, ie, js, je, & 724 | isd, ied, jsd, jed, & 725 | reg_bc_update_time, & 726 | iq ) 727 | enddo 728 | endif 729 | 730 | if (trdm>1.e-4) then 731 | call timing_on('COMM_TOTAL') 732 | call timing_on('COMM_TRACER') 733 | call complete_group_halo_update(dp1_pack, domain) 734 | call timing_off('COMM_TRACER') 735 | call timing_off('COMM_TOTAL') 736 | 737 | endif 738 | 739 | 740 | !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,jsd,jed,npz,dp1,mfx,mfy,rarea,nq, & 741 | !$OMP area,xfx,yfx,q,cx,cy,npx,npy,hord,gridstruct,bd,it,nsplt,nord_tr,trdm,lim_fac) & 742 | !$OMP private(dp2, ra_x, ra_y, fx, fy) 743 | do k=1,npz 744 | 745 | do j=js,je 746 | do i=is,ie 747 | dp2(i,j) = dp1(i,j,k) + (mfx(i,j,k)-mfx(i+1,j,k)+mfy(i,j,k)-mfy(i,j+1,k))*rarea(i,j) 748 | enddo 749 | enddo 750 | 751 | do j=jsd,jed 752 | do i=is,ie 753 | ra_x(i,j) = area(i,j) + xfx(i,j,k) - xfx(i+1,j,k) 754 | enddo 755 | enddo 756 | do j=js,je 757 | do i=isd,ied 758 | ra_y(i,j) = area(i,j) + yfx(i,j,k) - yfx(i,j+1,k) 759 | enddo 760 | enddo 761 | 762 | do iq=1,nq 763 | if ( it==1 .and. trdm>1.e-4 ) then 764 | call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), & 765 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 766 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k), & 767 | mass=dp1(isd,jsd,k), nord=nord_tr, damp_c=trdm) 768 | else 769 | call fv_tp_2d(q(isd,jsd,k,iq), cx(is,jsd,k), cy(isd,js,k), & 770 | npx, npy, hord, fx, fy, xfx(is,jsd,k), yfx(isd,js,k), & 771 | gridstruct, bd, ra_x, ra_y, lim_fac, mfx=mfx(is,js,k), mfy=mfy(is,js,k)) 772 | endif 773 | do j=js,je 774 | do i=is,ie 775 | q(i,j,k,iq) = ( q(i,j,k,iq)*dp1(i,j,k) + & 776 | (fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))*rarea(i,j) )/dp2(i,j) 777 | enddo 778 | enddo 779 | enddo 780 | enddo ! npz 781 | 782 | if ( it /= nsplt ) then 783 | call timing_on('COMM_TOTAL') 784 | call timing_on('COMM_TRACER') 785 | call start_group_halo_update(q_pack, q, domain) 786 | call timing_off('COMM_TRACER') 787 | call timing_off('COMM_TOTAL') 788 | endif 789 | 790 | enddo ! nsplt 791 | 792 | if ( id_divg > 0 ) then 793 | rdt = 1./(frac*dt) 794 | 795 | !$OMP parallel do default(none) shared(is,ie,js,je,npz,dp1,xfx,yfx,rarea,rdt) 796 | do k=1,npz 797 | do j=js,je 798 | do i=is,ie 799 | dp1(i,j,k) = (xfx(i+1,j,k)-xfx(i,j,k) + yfx(i,j+1,k)-yfx(i,j,k))*rarea(i,j)*rdt 800 | enddo 801 | enddo 802 | enddo 803 | endif 804 | 805 | end subroutine tracer_2d_nested 806 | 807 | end module fv_tracer2d_mod 808 | -------------------------------------------------------------------------------- /dyn_grid.F90: -------------------------------------------------------------------------------- 1 | module dyn_grid 2 | !------------------------------------------------------------------------------- 3 | ! Define FV3 computational grids on the dynamics decomposition. 4 | ! 5 | ! The grid used by the FV3 dynamics is called the FSSL grid and is a 6 | ! gnomonic cubed sphere consisting of 6 tiled faces. Each tile consists 7 | ! of an array of cells whose coordinates are great circles. The grid 8 | ! nomenclature (C96, C384, etc.) describes the number of cells along 9 | ! the top and side of a tile face (square). All prognostic variables 10 | ! are 3-D cell-mean values (cell center), except for the horizontal winds, 11 | ! which are 2-D face-mean values located on the cell walls (D-Grid winds). 12 | ! Each tile can be decomposed into a number of subdomains (consisting of 13 | ! one or more cells) which correspond to "blocks" in the physics/dynamics 14 | ! coupler terminology. The namelist variable "layout" consists of 2 integers 15 | ! and determines the size/shape of the blocks by dividing the tile into a 16 | ! number of horizonal and vertical sections. The total number of blocks in 17 | ! the global domain is therefore layout(1)*layout(2)*ntiles. The decomposition 18 | ! and communication infrastructure is provided by the GFDL FMS library. 19 | ! 20 | ! Module responsibilities: 21 | ! 22 | ! . Provide the physics/dynamics coupler (in module phys_grid) with data for the 23 | ! physics grid on the dynamics decomposition. 24 | ! 25 | ! . Create CAM grid objects that are used by the I/O functionality to read 26 | ! data from an unstructured grid format to the dynamics data structures, and 27 | ! to write from the dynamics data structures to unstructured grid format. The 28 | ! global column ordering for the unstructured grid is determined by the FV3 dycore. 29 | ! 30 | !------------------------------------------------------------------------------- 31 | 32 | use cam_abortutils, only: endrun 33 | use cam_grid_support, only: iMap 34 | use cam_logfile, only: iulog 35 | use dimensions_mod, only: npx, npy, ntiles 36 | use fms_mod, only: fms_init, write_version_number, set_domain 37 | use fv_arrays_mod, only: fv_atmos_type 38 | use fv_control_mod, only: ngrids,fv_control_init 39 | use mpp_mod, only: mpp_pe, mpp_root_pe 40 | use physics_column_type, only: physics_column_t 41 | use physconst, only: rearth,pi 42 | use shr_kind_mod, only: r8 => shr_kind_r8 43 | use spmd_utils, only: mpicom, masterproc 44 | 45 | implicit none 46 | private 47 | save 48 | 49 | ! The FV3 dynamics grids and initial file ncol grid 50 | integer, parameter :: dyn_decomp = 101 51 | integer, parameter :: dyn_decomp_ew = 102 52 | integer, parameter :: dyn_decomp_ns = 103 53 | integer, parameter :: dyn_decomp_hist = 104 54 | integer, parameter :: dyn_decomp_hist_ew = 105 55 | integer, parameter :: dyn_decomp_hist_ns = 106 56 | integer, parameter :: ini_decomp = 107 57 | 58 | character(len=3), protected :: ini_grid_name = 'INI' 59 | 60 | integer, parameter :: ptimelevels = 2 ! number of time levels in the dycore 61 | 62 | integer :: mytile = 1 63 | integer :: p_split = 1 64 | integer, allocatable :: pelist(:) 65 | 66 | real(r8), parameter :: rad2deg = 180._r8/pi 67 | 68 | logical, allocatable :: grids_on_this_pe(:) 69 | type(fv_atmos_type), allocatable, target :: Atm(:) 70 | type(physics_column_t), allocatable, target :: local_dyn_columns(:) 71 | 72 | public :: dyn_decomp 73 | public :: ini_grid_name 74 | public :: p_split 75 | public :: grids_on_this_pe 76 | public :: ptimelevels 77 | public :: dyn_grid_get_elem_coords 78 | !----------------------------------------------------------------------- 79 | ! Calculate Global Index 80 | 81 | integer, allocatable, target, dimension(:,:) :: myblkidx 82 | real(r8), allocatable, target, dimension(:,:,:) :: locidx_g 83 | real(r8), allocatable, target, dimension(:,:,:) :: blkidx_g 84 | real(r8), allocatable, target, dimension(:,:,:) :: gindex_g 85 | 86 | real(r8), allocatable :: block_extents_g(:,:) 87 | 88 | integer :: uniqpts_glob = 0 ! number of dynamics columns 89 | integer :: uniqpts_glob_ew = 0 ! number of dynamics columns for D grid ew 90 | integer :: uniqpts_glob_ns = 0 ! number of dynamics columns for D grid ns 91 | 92 | real(r8), pointer, dimension(:,:,:) :: grid_ew, grid_ns 93 | 94 | !----------------------------------------------------------------------- 95 | 96 | public :: dyn_grid_init ! Initialize the dynamics grid 97 | public :: get_horiz_grid_dim_d 98 | public :: get_horiz_grid_d ! get horizontal grid coordinates 99 | public :: get_dyn_grid_info ! Return physics grid column information 100 | public :: get_dyn_grid_parm 101 | public :: get_dyn_grid_parm_real1d 102 | public :: dyn_grid_get_colndx ! get element block/column and MPI process indices 103 | public :: physgrid_copy_attributes_d ! Attributes to copy to physics grid 104 | 105 | public Atm, mytile 106 | 107 | !======================================================================= 108 | contains 109 | !======================================================================= 110 | 111 | subroutine dyn_grid_init() 112 | 113 | ! Initialize FV grid, decomposition 114 | 115 | use block_control_mod, only: block_control_type, define_blocks_packed 116 | use cam_initfiles, only: initial_file_get_id 117 | use constants_mod, only: constants_init 118 | use fv_mp_mod, only: switch_current_Atm,mp_gather 119 | use hycoef, only: hycoef_init, hyai, hybi, hypi, hypm, nprlev 120 | use mpp_mod, only: mpp_init, mpp_npes, mpp_get_current_pelist,mpp_gather 121 | use pmgrid, only: plev 122 | use ref_pres, only: ref_pres_init 123 | use time_manager, only: get_step_size 124 | use pio, only: file_desc_t 125 | 126 | ! Local variables 127 | 128 | type(file_desc_t), pointer :: fh_ini 129 | 130 | character(len=*), parameter :: sub='dyn_grid_init' 131 | character(len=128) :: version = '$Id$' 132 | character(len=128) :: tagname = '$Name$' 133 | 134 | real(r8) :: dt_atmos_real = 0._r8 135 | 136 | integer :: i, j, k, tile 137 | integer :: is,ie,js,je,n,nx,ny 138 | character(len=128) :: errmsg 139 | 140 | !----------------------------------------------------------------------- 141 | ! from couple_main initialize atm structure - initializes fv3 grid 142 | !----------------------------------------------------------------------- 143 | 144 | call fms_init(mpicom) 145 | call mpp_init() 146 | call constants_init 147 | 148 | !----------------------------------------------------------------------- 149 | ! initialize atmospheric model ----- 150 | 151 | allocate(pelist(mpp_npes())) 152 | call mpp_get_current_pelist(pelist) 153 | 154 | !---- compute physics/atmos time step in seconds ---- 155 | 156 | dt_atmos_real = get_step_size() 157 | 158 | !----- initialize FV dynamical core ----- 159 | 160 | call fv_control_init( Atm, dt_atmos_real, mytile, grids_on_this_pe, p_split) ! allocates Atm components 161 | 162 | !----- write version and namelist to log file ----- 163 | call write_version_number ( version, tagname ) 164 | 165 | !! call switch_current_Atm(Atm(mytile)) 166 | call set_domain(Atm(mytile)%domain) 167 | 168 | !! set up dimensions_mod convenience variables. 169 | 170 | is = Atm(mytile)%bd%is 171 | ie = Atm(mytile)%bd%ie 172 | js = Atm(mytile)%bd%js 173 | je = Atm(mytile)%bd%je 174 | npx = Atm(mytile)%flagstruct%npx 175 | npy = Atm(mytile)%flagstruct%npy 176 | ntiles = Atm(mytile)%gridstruct%ntiles_g 177 | tile = Atm(mytile)%tile_of_mosaic 178 | 179 | if (Atm(mytile)%flagstruct%npz /= plev) then 180 | write(errmsg,*) 'FV3 dycore levels (npz),',Atm(mytile)%flagstruct%npz,' do not match model levels (plev)',plev 181 | call endrun(sub//':'//errmsg) 182 | end if 183 | 184 | ! Get file handle for initial file 185 | fh_ini => initial_file_get_id() 186 | 187 | ! Initialize hybrid coordinate arrays 188 | call hycoef_init(fh_ini) 189 | 190 | ! Initialize reference pressures 191 | call ref_pres_init(hypi, hypm, nprlev) 192 | 193 | ! Hybrid coordinate info for FV grid object 194 | Atm(mytile)%ks = plev 195 | do k = 1, plev+1 196 | Atm(mytile)%ak(k) = hyai(k) * 1.e5_r8 197 | Atm(mytile)%bk(k) = hybi(k) 198 | if ( Atm(mytile)%bk(k) == 0._r8) Atm(mytile)%ks = k-1 199 | end do 200 | Atm(mytile)%ptop = Atm(mytile)%ak(1) 201 | 202 | ! Define the CAM grids 203 | call define_cam_grids(Atm) 204 | 205 | end subroutine dyn_grid_init 206 | 207 | !======================================================================= 208 | subroutine get_dyn_grid_info(hdim1_d, hdim2_d, num_lev, & 209 | index_model_top_layer, index_surface_layer, unstructured, dyn_columns) 210 | !------------------------------------------------------------ 211 | ! 212 | ! get_dyn_grid_info returns physics grid column information 213 | ! 214 | !------------------------------------------------------------ 215 | use cam_abortutils, only: endrun 216 | use pmgrid, only: plev 217 | use shr_const_mod, only: SHR_CONST_PI 218 | use spmd_utils, only: iam 219 | 220 | ! Dummy arguments 221 | integer, intent(out) :: hdim1_d ! # longitudes or grid size 222 | integer, intent(out) :: hdim2_d ! # latitudes or 1 223 | integer, intent(out) :: num_lev ! # levels 224 | integer, intent(out) :: index_model_top_layer 225 | integer, intent(out) :: index_surface_layer 226 | logical, intent(out) :: unstructured 227 | ! dyn_columns will contain a copy of the physics column info local to this 228 | ! dynamics task 229 | type(physics_column_t), allocatable, intent(out) :: dyn_columns(:) 230 | 231 | ! Local variables 232 | character(len=*), parameter :: subname = 'get_dyn_grid_info' 233 | 234 | integer :: lindex 235 | integer :: gindex 236 | integer :: num_local_cols, num_global_cols 237 | integer :: i, j, is, ie, js, je 238 | integer :: tile,npx,npy,ntiles 239 | 240 | real(r8), pointer, dimension(:,:,:) :: agrid 241 | real(r8), pointer, dimension(:,:,:) :: grid 242 | real(r8), pointer, dimension(:,:) :: area 243 | real(r8), parameter :: radtodeg = 180.0_r8 / SHR_CONST_PI 244 | real(r8), parameter :: degtorad = SHR_CONST_PI / 180.0_r8 245 | 246 | unstructured = .true. ! FV3 is an unstructured dycore 247 | 248 | area => Atm(mytile)%gridstruct%area_64 249 | agrid => Atm(mytile)%gridstruct%agrid_64 250 | grid => Atm(mytile)%gridstruct%grid_64 251 | ntiles = Atm(mytile)%gridstruct%ntiles_g 252 | tile = Atm(mytile)%tile_of_mosaic 253 | npx = Atm(mytile)%flagstruct%npx 254 | npy = Atm(mytile)%flagstruct%npy 255 | 256 | is = Atm(mytile)%bd%is 257 | ie = Atm(mytile)%bd%ie 258 | js = Atm(mytile)%bd%js 259 | je = Atm(mytile)%bd%je 260 | 261 | num_local_cols = (je-js+1)*(ie-is+1) 262 | num_global_cols= (npx-1)*(npy-1)*ntiles 263 | 264 | if (allocated(local_dyn_columns)) then 265 | ! Check for correct number of columns 266 | if (size(local_dyn_columns) /= num_local_cols) then 267 | call endrun(subname//': called with inconsistent column numbers') 268 | end if 269 | else 270 | allocate(local_dyn_columns(num_local_cols)) 271 | 272 | hdim1_d = num_global_cols 273 | hdim2_d = 1 274 | num_lev = plev 275 | index_model_top_layer = 1 276 | index_surface_layer = plev 277 | lindex = 0 278 | do j = js, je 279 | do i = is, ie 280 | lindex = lindex + 1 281 | local_dyn_columns(lindex)%lon_rad = agrid(i,j,1) 282 | local_dyn_columns(lindex)%lon_deg = local_dyn_columns(lindex)%lon_rad * radtodeg 283 | local_dyn_columns(lindex)%lat_rad = agrid(i,j,2) 284 | local_dyn_columns(lindex)%lat_deg = local_dyn_columns(lindex)%lat_rad * radtodeg 285 | local_dyn_columns(lindex)%area = area(i,j)/(rearth*rearth) 286 | local_dyn_columns(lindex)%weight = local_dyn_columns(lindex)%area 287 | ! File decomposition 288 | gindex=((j-1)*(npx-1)+i)+((npx-1)*(npy-1)*(tile-1)) 289 | local_dyn_columns(lindex)%global_col_num = gindex 290 | ! Note, coord_indices not used for unstructured dycores 291 | ! Dynamics decomposition 292 | local_dyn_columns(lindex)%dyn_task = iam 293 | local_dyn_columns(lindex)%local_dyn_block = 1 294 | local_dyn_columns(lindex)%global_dyn_block = mpp_pe() + 1 295 | allocate(local_dyn_columns(lindex)%dyn_block_index(1)) 296 | local_dyn_columns(lindex)%dyn_block_index(1) = lindex 297 | end do 298 | end do 299 | end if 300 | ! Copy the information to the output array 301 | if (allocated(dyn_columns)) then 302 | deallocate(dyn_columns) 303 | end if 304 | allocate(dyn_columns(num_local_cols)) 305 | do lindex = 1, num_local_cols 306 | dyn_columns(lindex) = local_dyn_columns(lindex) 307 | end do 308 | end subroutine get_dyn_grid_info 309 | !======================================================================= 310 | 311 | subroutine get_horiz_grid_dim_d(hdim1_d, hdim2_d) 312 | 313 | ! Returns declared horizontal dimensions of computational grid. 314 | ! For non-lon/lat grids, declare grid to be one-dimensional, 315 | 316 | use dimensions_mod, only: npx,npy,ntiles 317 | 318 | ! arguments 319 | integer, intent(out) :: hdim1_d ! first horizontal dimension 320 | integer, intent(out), optional :: hdim2_d ! second horizontal dimension 321 | !----------------------------------------------------------------------- 322 | 323 | hdim1_d = (npx-1)*(npy-1)*ntiles 324 | if (present(hdim2_d)) hdim2_d = 1 325 | 326 | end subroutine get_horiz_grid_dim_d 327 | 328 | !======================================================================= 329 | 330 | subroutine physgrid_copy_attributes_d(gridname, grid_attribute_names) 331 | 332 | ! create list of attributes for the physics grid that should be copied 333 | ! from the corresponding grid object on the dynamics decomposition 334 | 335 | use cam_grid_support, only: max_hcoordname_len 336 | 337 | ! arguments 338 | character(len=max_hcoordname_len), intent(out) :: gridname 339 | character(len=max_hcoordname_len), pointer, intent(out) :: grid_attribute_names(:) 340 | !----------------------------------------------------------------------- 341 | 342 | gridname = 'FFSL' 343 | allocate(grid_attribute_names(1)) 344 | ! For standard CAM-FV3, we need to copy the area attribute. 345 | ! For physgrid, the physics grid will create area 346 | grid_attribute_names(1) = 'cell' 347 | 348 | end subroutine physgrid_copy_attributes_d 349 | 350 | !======================================================================= 351 | 352 | 353 | integer function get_dyn_grid_parm(name) result(ival) 354 | 355 | ! This function is in the process of being deprecated, but is still needed 356 | ! as a dummy interface to satisfy external references from some chemistry routines. 357 | 358 | use pmgrid, only: plat, plev 359 | 360 | character(len=*), intent(in) :: name 361 | integer is,ie,js,je 362 | 363 | is = Atm(mytile)%bd%is 364 | ie = Atm(mytile)%bd%ie 365 | js = Atm(mytile)%bd%js 366 | je = Atm(mytile)%bd%je 367 | 368 | if (name == 'plat') then 369 | ival = plat 370 | else if (name == 'plon') then 371 | ival = (je-js+1)*(ie-is+1) 372 | else if (name == 'plev') then 373 | ival = plev 374 | else 375 | call endrun('get_dyn_grid_parm: undefined name: '//adjustl(trim(name))) 376 | end if 377 | 378 | end function get_dyn_grid_parm 379 | 380 | !========================================================================================= 381 | 382 | subroutine dyn_grid_get_colndx( igcol, ncols, owners, indx, jndx) 383 | use spmd_utils, only: iam 384 | 385 | ! For each global column index return the owning task. If the column is owned 386 | ! by this task, then also return the MPI process indicies for that column 387 | 388 | ! arguments 389 | integer, intent(in) :: ncols 390 | integer, intent(in) :: igcol(ncols) 391 | integer, intent(out) :: owners(ncols) 392 | integer, intent(out) :: indx(ncols) 393 | integer, intent(out) :: jndx(ncols) 394 | 395 | !---------------------------------------------------------------------------- 396 | 397 | owners = (igcol * 0) -1 ! Kill compiler warnings 398 | indx = -1 ! Kill compiler warnings 399 | jndx = -1 ! Kill compiler warnings 400 | call endrun('dyn_grid_get_colndx: not implemented for unstructured grids') 401 | 402 | end subroutine dyn_grid_get_colndx 403 | 404 | !======================================================================= 405 | 406 | subroutine dyn_grid_get_elem_coords(ie, rlon, rlat, cdex) 407 | 408 | ! Returns coordinates of a specified block element of the dyn grid 409 | ! 410 | 411 | ! arguments 412 | integer, intent(in) :: ie ! block element index 413 | real(r8),optional, intent(out) :: rlon(:) ! longitudes of the columns in the element 414 | real(r8),optional, intent(out) :: rlat(:) ! latitudes of the columns in the element 415 | integer, optional, intent(out) :: cdex(:) ! global column index 416 | !---------------------------------------------------------------------------- 417 | 418 | call endrun('dyn_grid_get_elem_coords: currently not avaliable.') 419 | 420 | end subroutine dyn_grid_get_elem_coords 421 | 422 | !========================================================================================= 423 | 424 | subroutine define_cam_grids(Atm) 425 | 426 | ! Create grid objects on the dynamics decomposition for grids used by 427 | ! the dycore. The decomposed grid object contains data for the elements 428 | ! in each task and information to map that data to the global grid. 429 | ! 430 | ! Notes on dynamic memory management: 431 | ! 432 | ! . Coordinate values and the map passed to the horiz_coord_create 433 | ! method are copied to the object. The memory may be deallocated 434 | ! after the object is created. 435 | ! 436 | ! . The area values passed to cam_grid_attribute_register are only pointed 437 | ! to by the attribute object, so that memory cannot be deallocated. But the 438 | ! map is copied. 439 | ! 440 | ! . The grid_map passed to cam_grid_register is just pointed to. 441 | ! Cannot be deallocated. 442 | 443 | use cam_grid_support, only: horiz_coord_t, horiz_coord_create 444 | use cam_grid_support, only: cam_grid_register, cam_grid_attribute_register 445 | use fv_grid_utils_mod, only: mid_pt_sphere 446 | use mpp_mod, only: mpp_pe 447 | use physconst, only: rearth 448 | 449 | ! arguments 450 | type(fv_atmos_type), target, intent(in) :: Atm(:) 451 | 452 | ! local variables 453 | type(horiz_coord_t), pointer :: lat_coord 454 | type(horiz_coord_t), pointer :: lon_coord 455 | 456 | integer(iMap), pointer :: grid_map(:,:) 457 | 458 | integer, allocatable, target, dimension(:,:) :: mygid, mygid_ew,mygid_ns 459 | integer :: mybindex 460 | integer :: i, j, mapind,is,ie,js,je,isd,ied,jsd,jed,tile 461 | real(r8), pointer, dimension(:,:,:) :: agrid 462 | real(r8), pointer, dimension(:,:,:) :: grid 463 | real(r8), pointer, dimension(:,:) :: area 464 | real(r8), pointer :: area_ffsl(:) !fv3 cell centered grid area in sq radians 465 | real(r8), pointer :: pelon_deg(:) 466 | real(r8), pointer :: pelat_deg(:) 467 | real(r8), pointer :: pelon_deg_ew(:) 468 | real(r8), pointer :: pelat_deg_ew(:) 469 | real(r8), pointer :: pelon_deg_ns(:) 470 | real(r8), pointer :: pelat_deg_ns(:) 471 | real(r8) :: lonrad,latrad 472 | integer(iMap), pointer :: pemap(:) 473 | integer(iMap), pointer :: pemap_ew(:) 474 | integer(iMap), pointer :: pemap_ns(:) 475 | integer :: iend, jend 476 | 477 | !----------------------------------------------------------------------- 478 | 479 | area => Atm(mytile)%gridstruct%area_64 480 | agrid => Atm(mytile)%gridstruct%agrid_64 481 | grid => Atm(mytile)%gridstruct%grid_64 482 | is = Atm(mytile)%bd%is 483 | ie = Atm(mytile)%bd%ie 484 | js = Atm(mytile)%bd%js 485 | je = Atm(mytile)%bd%je 486 | isd = Atm(mytile)%bd%isd 487 | ied = Atm(mytile)%bd%ied 488 | jsd = Atm(mytile)%bd%jsd 489 | jed = Atm(mytile)%bd%jed 490 | tile = Atm(mytile)%tile_of_mosaic 491 | 492 | allocate(area_ffsl((ie-is+1)*(je-js+1))) 493 | allocate(grid_ew(isd:ied+1,jsd:jed,2)) 494 | allocate(grid_ns(isd:ied,jsd:jed+1,2)) 495 | allocate(pelon_deg((ie-is+1)*(je-js+1))) 496 | allocate(pelon_deg_ns((ie-is+1)*(je-js+2))) 497 | allocate(pelon_deg_ew((ie-is+2)*(je-js+1))) 498 | allocate(pelat_deg((ie-is+1)*(je-js+1))) 499 | allocate(pelat_deg_ew((ie-is+2)*(je-js+1))) 500 | allocate(pelat_deg_ns((ie-is+1)*(je-js+2))) 501 | allocate(pemap((ie-is+1)*(je-js+1))) 502 | allocate(pemap_ew((ie-is+2)*(je-js+1))) 503 | allocate(pemap_ns((ie-is+1)*(je-js+2))) 504 | 505 | do j=jsd,jed 506 | do i=isd,ied+1 507 | call mid_pt_sphere(grid(i, j,1:2), grid(i, j+1,1:2), grid_ew(i,j,:)) 508 | end do 509 | end do 510 | 511 | do j=jsd,jed+1 512 | do i=isd,ied 513 | call mid_pt_sphere(grid(i,j ,1:2), grid(i+1,j ,1:2), grid_ns(i,j,:)) 514 | end do 515 | end do 516 | 517 | allocate(mygid(is:ie,js:je)) 518 | allocate(mygid_ew(is:ie+1,js:je)) 519 | allocate(mygid_ns(is:ie,js:je+1)) 520 | 521 | mygid=0 522 | 523 | mybindex = mpp_pe() + 1 524 | 525 | do j = js, je 526 | do i = is, ie 527 | mygid(i,j)=((j-1)*(npx-1)+i)+((npx-1)*(npy-1)*(tile-1)) 528 | end do 529 | end do 530 | 531 | ! calculate local portion of global NS index array 532 | ! unique global indexing bottom left to top right of each tile consecutively. Dups reported as 0 533 | ! North tile edges of 2,4,6 are duplicates of south edge of 3,5,1 and are reported as 0 in mygid array 534 | mygid_ns=0 535 | if (je+1 == npy) then 536 | jend = je+mod(tile,2) 537 | else 538 | jend = je+1 539 | end if 540 | do j = js, jend 541 | do i = is, ie 542 | mygid_ns(i,j)=(i-1)*(npy-(mod(tile-1,2))) + j + (int((tile-1)/2)*(npx-1)*(npy-1)) + (int(tile/2)*(npx-1)*(npy)) 543 | end do 544 | end do 545 | ! appropriate tile boundaries already 0'd need to 546 | ! zero inner tile je+1 boundaries (These are also repeated points between tasks in ns direction)) 547 | if (je+1 /= npy) mygid_ns(is:ie,je+1)=0 548 | 549 | ! calculate local portion of global EW index array 550 | ! unique global indexing bottom left to top right of each tile consecutively. Dups reported as 0 551 | ! East tile edges of 1,3,5 are duplicates of west edge of 2,4,6 and are reported as 0 in mygid array 552 | mygid_ew=0 553 | if (ie+1 == npx) then 554 | iend=ie+mod(tile-1,2) 555 | else 556 | iend=ie+1 557 | end if 558 | do j = js, je 559 | do i = is, iend 560 | mygid_ew(i,j)=(j-1)*(npx-(mod(tile,2))) + i + (int(tile/2)*(npx-1)*(npy-1)) + (int((tile-1)/2)*(npx)*(npy-1)) 561 | end do 562 | end do 563 | 564 | ! appropriate east tile boundaries already 0'd from above need to 565 | ! zero inner tile ie+1 boundaries on appropriate processors 566 | ! (These are also repeated points between tasks in ew direction) 567 | if (ie+1 /= npx) mygid_ew(ie+1,js:je)=0 568 | 569 | !----------------------- 570 | ! Create FFSL grid object 571 | !----------------------- 572 | 573 | ! Calculate the mapping between FFSL points and file order (tile1 thru tile6) 574 | mapind = 1 575 | do j = js, je 576 | do i = is, ie 577 | pelon_deg(mapind) = agrid(i,j,1) * rad2deg 578 | pelat_deg(mapind) = agrid(i,j,2) * rad2deg 579 | area_ffsl(mapind) = area(i,j)/(rearth*rearth) 580 | pemap(mapind) = mygid(i,j) 581 | mapind = mapind + 1 582 | end do 583 | end do 584 | 585 | mapind = 1 586 | do j = js, je 587 | do i = is, ie+1 588 | lonrad=grid_ew(i,j,1) 589 | latrad=grid_ew(i,j,2) 590 | pelon_deg_ew(mapind) = lonrad * rad2deg 591 | pelat_deg_ew(mapind) = latrad * rad2deg 592 | pemap_ew(mapind) = mygid_ew(i,j) 593 | mapind = mapind + 1 594 | end do 595 | end do 596 | 597 | mapind = 1 598 | do j = js, je+1 599 | do i = is, ie 600 | lonrad=grid_ns(i,j,1) 601 | latrad=grid_ns(i,j,2) 602 | pelon_deg_ns(mapind) = lonrad * rad2deg 603 | pelat_deg_ns(mapind) = latrad * rad2deg 604 | pemap_ns(mapind) = mygid_ns(i,j) 605 | mapind = mapind + 1 606 | end do 607 | end do 608 | 609 | allocate(grid_map(3, (ie-is+1)*(je-js+1))) 610 | grid_map = 0 611 | mapind = 1 612 | do j = js, je 613 | do i = is, ie 614 | grid_map(1, mapind) = i 615 | grid_map(2, mapind) = j 616 | grid_map(3, mapind) = pemap(mapind) 617 | mapind = mapind + 1 618 | end do 619 | end do 620 | 621 | ! output local and global uniq points 622 | uniqpts_glob=(npx-1)*(npy-1)*6 623 | 624 | ! with FV3 if the initial file uses the horizontal dimension 'ncol' rather than 625 | ! 'ncol_d' then we need a grid object with the names ncol,lat,lon to read it. 626 | ! Create that grid object here. 627 | 628 | lat_coord => horiz_coord_create('lat', 'ncol', uniqpts_glob, 'latitude', & 629 | 'degrees_north', 1, size(pelat_deg), pelat_deg, map=pemap) 630 | lon_coord => horiz_coord_create('lon', 'ncol', uniqpts_glob, 'longitude', & 631 | 'degrees_east', 1, size(pelon_deg), pelon_deg, map=pemap) 632 | 633 | ! register physics cell-center/A-grid 634 | call cam_grid_register(ini_grid_name, ini_decomp, lat_coord, lon_coord, & 635 | grid_map, block_indexed=.false., unstruct=.true.) 636 | call cam_grid_attribute_register(ini_grid_name, 'cell', '', 1) 637 | call cam_grid_attribute_register(ini_grid_name, 'area', 'cam cell center areas', & 638 | 'ncol', area_ffsl, map=pemap) 639 | nullify(lat_coord) 640 | nullify(lon_coord) 641 | 642 | ! create and register dynamic A-grid, src_in(/1,2/) allows ilev,jlev,nlev ordering for restart IO 643 | lat_coord => horiz_coord_create('lat_d', 'ncol_d', uniqpts_glob, 'latitude', & 644 | 'degrees_north', 1, size(pelat_deg), pelat_deg, map=pemap) 645 | lon_coord => horiz_coord_create('lon_d', 'ncol_d', uniqpts_glob, 'longitude', & 646 | 'degrees_east', 1, size(pelon_deg), pelon_deg, map=pemap) 647 | 648 | call cam_grid_register('FFSL', dyn_decomp, lat_coord, lon_coord, & 649 | grid_map, block_indexed=.false., unstruct=.true.,src_in=(/1,2/)) 650 | call cam_grid_attribute_register('FFSL', 'cell', '', 1) 651 | call cam_grid_attribute_register('FFSL', 'area_d', 'FFSL grid areas', & 652 | 'ncol_d', area_ffsl, map=pemap) 653 | 654 | ! register grid for writing dynamics A-Grid fields in history files 655 | call cam_grid_register('FFSLHIST', dyn_decomp_hist, lat_coord, lon_coord, & 656 | grid_map, block_indexed=.false., unstruct=.true.) 657 | call cam_grid_attribute_register('FFSLHIST', 'cell', '', 1) 658 | call cam_grid_attribute_register('FFSLHIST', 'area_d', 'FFSLHIST grid areas', & 659 | 'ncol_d', area_ffsl, map=pemap) 660 | 661 | ! grid_map cannot be deallocated as the cam_filemap_t object just points 662 | ! to it. It can be nullified. 663 | nullify(grid_map) 664 | ! lat_coord and lon_coord belong to grid so can't be deleted. It can be nullified 665 | nullify(lat_coord) 666 | nullify(lon_coord) 667 | ! area_ffsl cannot be deallocated as the attribute object is just pointing 668 | ! to that memory. It can be nullified since the attribute object has 669 | ! the reference. 670 | nullify(area_ffsl) 671 | 672 | 673 | ! global EW uniq points 674 | uniqpts_glob_ew=((2*npx)-1)*(npy-1)*3 675 | 676 | lat_coord => horiz_coord_create('lat_d_ew', 'ncol_d_ew', uniqpts_glob_ew, 'latitude', & 677 | 'degrees_north', 1, size(pelat_deg_ew), pelat_deg_ew, map=pemap_ew) 678 | lon_coord => horiz_coord_create('lon_d_ew', 'ncol_d_ew', uniqpts_glob_ew, 'longitude', & 679 | 'degrees_east', 1, size(pelon_deg_ew), pelon_deg_ew, map=pemap_ew) 680 | 681 | allocate(grid_map(3, (ie-is+2)*(je-js+1))) 682 | grid_map = 0 683 | mapind = 1 684 | do j = js, je 685 | do i = is, ie+1 686 | grid_map(1, mapind) = i 687 | grid_map(2, mapind) = j 688 | grid_map(3, mapind) = pemap_ew(mapind) 689 | mapind = mapind + 1 690 | end do 691 | end do 692 | 693 | ! register dynamic D-grid, src_in(/1,2/) allows ilev,jlev,nlev ordering for restart IO 694 | call cam_grid_register('FFSL_EW', dyn_decomp_ew, lat_coord, lon_coord, & 695 | grid_map, block_indexed=.false., unstruct=.true.,src_in=(/1,2/)) 696 | call cam_grid_attribute_register('FFSL_EW', 'cell', '', 1) 697 | 698 | ! register grid for writing dynamics D-Grid fields in history files 699 | call cam_grid_register('FFSLHIST_EW', dyn_decomp_hist_ew, lat_coord, lon_coord, & 700 | grid_map, block_indexed=.false., unstruct=.true.) 701 | call cam_grid_attribute_register('FFSLHIST_EW', 'cell', '', 1) 702 | 703 | ! grid_map cannot be deallocated as the cam_filemap_t object just points 704 | ! to it. It can be nullified. 705 | nullify(grid_map) 706 | ! lat_coord and lon_coord belong to grid so can't be deleted. It can be nullified 707 | nullify(lat_coord) ! Belongs to grid 708 | nullify(lon_coord) ! Belongs to grid 709 | 710 | 711 | ! output local and global uniq points 712 | uniqpts_glob_ns=((2*npy)-1)*(npx-1)*3 713 | 714 | lat_coord => horiz_coord_create('lat_d_ns', 'ncol_d_ns', uniqpts_glob_ns, 'latitude', & 715 | 'degrees_north', 1, size(pelat_deg_ns), pelat_deg_ns, map=pemap_ns) 716 | lon_coord => horiz_coord_create('lon_d_ns', 'ncol_d_ns', uniqpts_glob_ns, 'longitude', & 717 | 'degrees_east', 1, size(pelon_deg_ns), pelon_deg_ns, map=pemap_ns) 718 | 719 | allocate(grid_map(3, (ie-is+1)*(je-js+2))) 720 | grid_map = 0 721 | mapind = 1 722 | do j = js, je+1 723 | do i = is, ie 724 | grid_map(1, mapind) = i 725 | grid_map(2, mapind) = j 726 | grid_map(3, mapind) = pemap_ns(mapind) 727 | mapind = mapind + 1 728 | end do 729 | end do 730 | 731 | ! register dynamic D-grid, src_in(/1,2/) allows ilev,jlev,nlev ordering for restart IO 732 | call cam_grid_register('FFSL_NS', dyn_decomp_ns, lat_coord, lon_coord, & 733 | grid_map, block_indexed=.false., unstruct=.true.,src_in=(/1,2/)) 734 | call cam_grid_attribute_register('FFSL_NS', 'cell', '', 1) 735 | 736 | ! register grid for writing dynamics D-Grid fields in history files 737 | call cam_grid_register('FFSLHIST_NS', dyn_decomp_hist_ns, lat_coord, lon_coord, & 738 | grid_map, block_indexed=.false., unstruct=.true.) 739 | call cam_grid_attribute_register('FFSLHIST_NS', 'cell', '', 1) 740 | 741 | ! grid_map cannot be deallocated as the cam_filemap_t object just points 742 | ! to it. It can be nullified. 743 | nullify(grid_map) 744 | ! lat_coord and lon_coord belong to grid so can't be deleted. It can be nullified 745 | nullify(lat_coord) ! Belongs to grid 746 | nullify(lon_coord) ! Belongs to grid 747 | 748 | deallocate(pelon_deg) 749 | deallocate(pelat_deg) 750 | deallocate(pelon_deg_ns) 751 | deallocate(pelat_deg_ns) 752 | deallocate(pelon_deg_ew) 753 | deallocate(pelat_deg_ew) 754 | deallocate(pemap) 755 | deallocate(pemap_ew) 756 | deallocate(pemap_ns) 757 | deallocate(mygid) 758 | deallocate(mygid_ew) 759 | deallocate(mygid_ns) 760 | 761 | end subroutine define_cam_grids 762 | 763 | !============================================================================= 764 | !== 765 | !!!!!! DUMMY INTERFACE TO TEST WEAK SCALING FIX, THIS SHOULD GO AWAY 766 | !== 767 | !============================================================================= 768 | 769 | subroutine get_horiz_grid_d(nxy, clat_d_out, clon_d_out, area_d_out, & 770 | wght_d_out, lat_d_out, lon_d_out) 771 | 772 | ! Return global arrays of latitude and longitude (in radians), column 773 | ! surface area (in radians squared) and surface integration weights for 774 | ! global column indices that will be passed to/from physics 775 | 776 | ! arguments 777 | integer, intent(in) :: nxy ! array sizes 778 | 779 | real(r8), intent(out), optional :: clat_d_out(:) ! column latitudes 780 | real(r8), intent(out), optional :: clon_d_out(:) ! column longitudes 781 | real(r8), intent(out), target, optional :: area_d_out(:) ! column surface 782 | 783 | real(r8), intent(out), target, optional :: wght_d_out(:) ! column integration weight 784 | real(r8), intent(out), optional :: lat_d_out(:) ! column degree latitudes 785 | real(r8), intent(out), optional :: lon_d_out(:) ! column degree longitudes 786 | character(len=*), parameter :: subname = 'get_horiz_grid_d' 787 | 788 | call endrun(subname//': NOT SUPPORTED WITH WEAK SCALING FIX') 789 | end subroutine get_horiz_grid_d 790 | 791 | !============================================================================== 792 | 793 | function get_dyn_grid_parm_real1d(name) result(rval) 794 | 795 | ! This routine is not used for SE, but still needed as a dummy interface to satisfy 796 | ! references from mo_synoz.F90 797 | 798 | character(len=*), intent(in) :: name 799 | real(r8), pointer :: rval(:) 800 | 801 | if(name.eq.'w') then 802 | call endrun('get_dyn_grid_parm_real1d: w not defined') 803 | else if(name.eq.'clat') then 804 | call endrun('get_dyn_grid_parm_real1d: clat not supported, use get_horiz_grid_d') 805 | else if(name.eq.'latdeg') then 806 | call endrun('get_dyn_grid_parm_real1d: latdeg not defined') 807 | else 808 | nullify(rval) 809 | end if 810 | end function get_dyn_grid_parm_real1d 811 | 812 | !============================================================================== 813 | 814 | end module dyn_grid 815 | -------------------------------------------------------------------------------- /dp_coupling.F90: -------------------------------------------------------------------------------- 1 | module dp_coupling 2 | 3 | !------------------------------------------------------------------------------- 4 | ! dynamics - physics coupling module 5 | !------------------------------------------------------------------------------- 6 | 7 | use cam_abortutils, only: endrun 8 | use cam_logfile, only: iulog 9 | use constituents, only: pcnst 10 | use dimensions_mod, only: npx,npy,nlev, & 11 | cnst_name_ffsl, cnst_longname_ffsl,fv3_lcp_moist,fv3_lcv_moist, & 12 | qsize_tracer_idx_cam2dyn,fv3_scale_ttend 13 | use dyn_comp, only: dyn_export_t, dyn_import_t 14 | use dyn_grid, only: mytile 15 | use fv_grid_utils_mod, only: g_sum 16 | use hycoef, only: hyam, hybm, hyai, hybi, ps0 17 | use mpp_domains_mod, only: mpp_update_domains, domain2D, DGRID_NE 18 | use perf_mod, only: t_startf, t_stopf 19 | use phys_grid, only: get_dyn_col_p, columns_on_task, get_chunk_info_p 20 | use phys_grid, only: get_ncols_p 21 | use physconst, only: cpair, gravit, rair, zvir, cappa 22 | use air_composition, only: rairv 23 | use physics_types, only: physics_state, physics_tend 24 | use ppgrid, only: begchunk, endchunk, pcols, pver, pverp 25 | use shr_kind_mod, only: r8=>shr_kind_r8, i8 => shr_kind_i8 26 | use spmd_dyn, only: local_dp_map 27 | use spmd_utils, only: masterproc 28 | 29 | implicit none 30 | private 31 | public :: d_p_coupling, p_d_coupling 32 | 33 | !======================================================================= 34 | contains 35 | !======================================================================= 36 | 37 | subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out) 38 | 39 | ! Convert the dynamics output state into the physics input state. 40 | ! Note that all pressures and tracer mixing ratios coming from the FV3 dycore are based on 41 | ! wet air mass. 42 | 43 | 44 | ! use dyn_comp, only: frontgf_idx, frontga_idx 45 | use fv_arrays_mod, only: fv_atmos_type 46 | !!$ use gravity_waves_sources, only: gws_src_fnct 47 | !!$ use phys_control, only: use_gw_front, use_gw_front_igw 48 | use physics_buffer, only: physics_buffer_desc, pbuf_get_chunk 49 | 50 | ! arguments 51 | type (dyn_export_t), intent(inout) :: dyn_out ! dynamics export 52 | type (physics_buffer_desc), pointer :: pbuf2d(:,:) 53 | type (physics_state), intent(inout), dimension(begchunk:endchunk) :: phys_state 54 | type (physics_tend ), intent(inout), dimension(begchunk:endchunk) :: phys_tend 55 | 56 | ! LOCAL VARIABLES 57 | 58 | integer :: i, j, k, n 59 | integer :: is,ie,js,je ! indices into fv3 block structure 60 | integer :: lchnk, icol, ilyr ! indices over chunks, columns, layers 61 | integer :: ncols 62 | integer :: col_ind, blk_num, blk_ind(1), m, m_cnst 63 | integer :: tsize ! amount of data per grid point passed to physics 64 | integer :: m_ffsl ! constituent index for ffsl grid 65 | 66 | type (fv_atmos_type), pointer :: Atm(:) 67 | 68 | ! LOCAL Allocatables 69 | real(r8), allocatable, dimension(:) :: phis_tmp !((ie-is+1)*(je-js+1)) ! temporary array to hold phis 70 | real(r8), allocatable, dimension(:) :: ps_tmp !((ie-is+1)*(je-js+1)) ! temporary array to hold ps 71 | real(r8), allocatable, dimension(:,:) :: T_tmp !((ie-is+1)*(je-js+1),pver) ! temporary array to hold T 72 | real(r8), allocatable, dimension(:,:) :: omega_tmp!((ie-is+1)*(je-js+1),pver) ! temporary array to hold omega 73 | real(r8), allocatable, dimension(:,:) :: pdel_tmp !((ie-is+1)*(je-js+1),pver) ! temporary array to hold pdel 74 | real(r8), allocatable, dimension(:,:) :: u_tmp !((ie-is+1)*(je-js+1),pver) ! temp array to hold u 75 | real(r8), allocatable, dimension(:,:) :: v_tmp !((ie-is+1)*(je-js+1),pver) ! temp array to hold v 76 | real(r8), allocatable, dimension(:,:,:) :: q_tmp !((ie-is+1)*(je-js+1),pver,pcnst) ! temp to hold advected constituents 77 | 78 | ! Frontogenesis 79 | real (kind=r8), allocatable :: frontgf(:,:) ! temp arrays to hold frontogenesis 80 | real (kind=r8), allocatable :: frontga(:,:) ! function (frontgf) and angle (frontga) 81 | real (kind=r8), allocatable :: frontgf_phys(:,:,:) 82 | real (kind=r8), allocatable :: frontga_phys(:,:,:) 83 | ! Pointers to pbuf 84 | real (kind=r8), pointer :: pbuf_frontgf(:,:) 85 | real (kind=r8), pointer :: pbuf_frontga(:,:) 86 | 87 | type(physics_buffer_desc), pointer :: pbuf_chnk(:) 88 | !----------------------------------------------------------------------- 89 | 90 | if (.not. local_dp_map) then 91 | call endrun('d_p_coupling: Weak scaling does not support load balancing') 92 | end if 93 | 94 | Atm=>dyn_out%atm 95 | 96 | is = Atm(mytile)%bd%is 97 | ie = Atm(mytile)%bd%ie 98 | js = Atm(mytile)%bd%js 99 | je = Atm(mytile)%bd%je 100 | 101 | nullify(pbuf_chnk) 102 | nullify(pbuf_frontgf) 103 | nullify(pbuf_frontga) 104 | 105 | ! Allocate temporary arrays to hold data for physics decomposition 106 | allocate(ps_tmp ((ie-is+1)*(je-js+1))) 107 | allocate(phis_tmp ((ie-is+1)*(je-js+1))) 108 | allocate(T_tmp ((ie-is+1)*(je-js+1),pver)) 109 | allocate(u_tmp ((ie-is+1)*(je-js+1),pver)) 110 | allocate(v_tmp ((ie-is+1)*(je-js+1),pver)) 111 | allocate(omega_tmp((ie-is+1)*(je-js+1),pver)) 112 | allocate(pdel_tmp ((ie-is+1)*(je-js+1),pver)) 113 | allocate(Q_tmp ((ie-is+1)*(je-js+1),pver,pcnst)) 114 | 115 | ps_tmp = 0._r8 116 | phis_tmp = 0._r8 117 | T_tmp = 0._r8 118 | u_tmp = 0._r8 119 | v_tmp = 0._r8 120 | omega_tmp= 0._r8 121 | pdel_tmp = 0._r8 122 | Q_tmp = 0._r8 123 | 124 | !!$ if (use_gw_front .or. use_gw_front_igw) then 125 | !!$ allocate(frontgf(nphys_pts,pver), stat=ierr) 126 | !!$ if (ierr /= 0) call endrun("dp_coupling: Allocate of frontgf failed.") 127 | !!$ allocate(frontga(nphys_pts,pver), stat=ierr) 128 | !!$ if (ierr /= 0) call endrun("dp_coupling: Allocate of frontga failed.") 129 | !!$ frontgf(:,:) = 0._r8 130 | !!$ frontga(:,:) = 0._r8 131 | !!$ end if 132 | 133 | !!$ ! q_prev is for saving the tracer fields for calculating tendencies 134 | !!$ if (.not. allocated(q_prev)) then 135 | !!$ allocate(q_prev(pcols,pver,pcnst,begchunk:endchunk)) 136 | !!$ end if 137 | !!$ q_prev = 0.0_R8 138 | 139 | n = 1 140 | do j = js, je 141 | do i = is, ie 142 | ps_tmp (n) = Atm(mytile)%ps (i, j) 143 | phis_tmp(n) = Atm(mytile)%phis(i, j) 144 | do k = 1, pver 145 | T_tmp (n, k) = Atm(mytile)%pt (i, j, k) 146 | u_tmp (n, k) = Atm(mytile)%ua (i, j, k) 147 | v_tmp (n, k) = Atm(mytile)%va (i, j, k) 148 | omega_tmp(n, k) = Atm(mytile)%omga(i, j, k) 149 | pdel_tmp (n, k) = Atm(mytile)%delp(i, j, k) 150 | ! 151 | ! The fv3 constituent array may be in a different order than the cam array, remap here. 152 | ! 153 | do m = 1, pcnst 154 | m_ffsl=qsize_tracer_idx_cam2dyn(m) 155 | Q_tmp(n, k, m) = Atm(mytile)%q(i, j, k, m_ffsl) 156 | end do 157 | end do 158 | n = n + 1 159 | end do 160 | end do 161 | 162 | call t_startf('dpcopy') 163 | !!$ if (use_gw_front .or. use_gw_front_igw) then 164 | !!$ allocate(frontgf_phys(pcols, pver, begchunk:endchunk)) 165 | !!$ allocate(frontga_phys(pcols, pver, begchunk:endchunk)) 166 | !!$ end if 167 | !$omp parallel do private (col_ind, lchnk, icol, blk_ind, ilyr, m) 168 | do col_ind = 1, columns_on_task 169 | call get_dyn_col_p(col_ind, blk_num, blk_ind) 170 | call get_chunk_info_p(col_ind, lchnk, icol) 171 | phys_state(lchnk)%ps(icol) = ps_tmp(blk_ind(1)) 172 | phys_state(lchnk)%phis(icol) = phis_tmp(blk_ind(1)) 173 | do ilyr = 1, pver 174 | phys_state(lchnk)%pdel(icol, ilyr) = pdel_tmp(blk_ind(1), ilyr) 175 | phys_state(lchnk)%t(icol, ilyr) = T_tmp(blk_ind(1), ilyr) 176 | phys_state(lchnk)%u(icol, ilyr) = u_tmp(blk_ind(1), ilyr) 177 | phys_state(lchnk)%v(icol, ilyr) = v_tmp(blk_ind(1), ilyr) 178 | phys_state(lchnk)%omega(icol, ilyr) = omega_tmp(blk_ind(1), ilyr) 179 | pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) 180 | !!$ if (use_gw_front .or. use_gw_front_igw) then 181 | !!$ call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) 182 | !!$ call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) 183 | !!$ frontgf_phys(icol, ilyr, lchnk) = frontgf(blk_ind(1), ilyr) 184 | !!$ frontga_phys(icol, ilyr, lchnk) = frontga(blk_ind(1), ilyr) 185 | !!$ end if 186 | do m = 1, pcnst 187 | phys_state(lchnk)%q(icol,ilyr,m) = Q_tmp(blk_ind(1),ilyr,m) 188 | end do 189 | end do 190 | end do 191 | !!$ if (use_gw_front .or. use_gw_front_igw) then 192 | !!$ !$omp parallel do num_threads(max_num_threads) private (lchnk, ncols, icol, ilyr, pbuf_chnk, pbuf_frontgf, pbuf_frontga) 193 | !!$ do lchnk = begchunk, endchunk 194 | !!$ ncols = get_ncols_p(lchnk) 195 | !!$ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) 196 | !!$ call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf) 197 | !!$ call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga) 198 | !!$ do icol = 1, ncols 199 | !!$ do ilyr = 1, pver 200 | !!$ pbuf_frontgf(icol, ilyr) = frontgf_phys(icol, ilyr, lchnk) 201 | !!$ pbuf_frontga(icol, ilyr) = frontga_phys(icol, ilyr, lchnk) 202 | !!$ end do 203 | !!$ end do 204 | !!$ end do 205 | !!$ deallocate(frontgf_phys) 206 | !!$ deallocate(frontga_phys) 207 | !!$ end if 208 | 209 | !!$ ! Save the tracer fields input to physics package for calculating tendencies 210 | !!$ ! The mixing ratios are all dry at this point. 211 | !!$ do lchnk = begchunk, endchunk 212 | !!$ ncols = phys_state(lchnk)%ncol 213 | !!$ q_prev(1:ncols,1:pver,1:pcnst,lchnk) = phys_state(lchnk)%q(1:ncols,1:pver,1:pcnst) 214 | !!$ end do 215 | 216 | call t_stopf('dpcopy') 217 | 218 | deallocate(ps_tmp ) 219 | deallocate(phis_tmp ) 220 | deallocate(T_tmp ) 221 | deallocate(u_tmp ) 222 | deallocate(v_tmp ) 223 | deallocate(omega_tmp) 224 | deallocate(pdel_tmp ) 225 | deallocate(Q_tmp ) 226 | 227 | ! derive the physics state from the dynamics state converting to proper vapor loading 228 | ! and setting dry mixing ratio variables based on cnst_type - no need to call wet_to_dry 229 | ! since derived_phys_dry takes care of that. 230 | 231 | call t_startf('derived_phys_dry') 232 | call derived_phys_dry(phys_state, phys_tend, pbuf2d) 233 | call t_stopf('derived_phys_dry') 234 | 235 | end subroutine d_p_coupling 236 | 237 | !======================================================================= 238 | 239 | subroutine p_d_coupling(phys_state, phys_tend, dyn_in) 240 | 241 | ! Convert the physics output state into the dynamics input state. 242 | 243 | use cam_history, only: outfld 244 | use constants_mod, only: cp_air, kappa 245 | use dyn_comp, only: calc_tot_energy_dynamics 246 | use fms_mod, only: set_domain 247 | use fv_arrays_mod, only: fv_atmos_type 248 | use fv_grid_utils_mod, only: cubed_to_latlon 249 | use air_composition, only: thermodynamic_active_species_num,thermodynamic_active_species_idx_dycore 250 | use air_composition, only: thermodynamic_active_species_cp,thermodynamic_active_species_cv,dry_air_species_num 251 | use phys_grid, only: get_dyn_col_p, columns_on_task, get_chunk_info_p 252 | use time_manager, only: get_step_size 253 | 254 | ! arguments 255 | type (physics_state), intent(inout), dimension(begchunk:endchunk) :: phys_state 256 | type (physics_tend), intent(inout), dimension(begchunk:endchunk) :: phys_tend 257 | type (dyn_import_t), intent(inout) :: dyn_in 258 | 259 | ! LOCAL VARIABLES 260 | 261 | integer :: blk_ind(1) ! element offset 262 | integer :: col_ind, blk_num ! index over columns, block number 263 | integer :: i, j, k,m, m_ffsl,n,nq 264 | integer :: idim 265 | integer :: is,isd,ie,ied,js,jsd,je,jed 266 | integer :: lchnk, icol, ilyr ! indices over chunks, columns, layers 267 | integer :: ncols 268 | integer :: num_wet_species ! total number of wet species (first tracers in FV3 tracer array) 269 | 270 | real (r8) :: dt 271 | real (r8) :: fv3_totwatermass, fv3_airmass 272 | real (r8) :: qall,cpfv3 273 | real (r8) :: tracermass(pcnst) 274 | 275 | type (fv_atmos_type), pointer :: Atm(:) 276 | 277 | real(r8), allocatable, dimension(:,:,:) :: delpdry ! temporary to hold tendencies 278 | real(r8), allocatable, dimension(:,:) :: pdel_tmp ! temporary to hold 279 | real(r8), allocatable, dimension(:,:) :: pdeldry_tmp ! temporary to hold 280 | real(r8), allocatable, dimension(:,:,:) :: t_dt ! temporary to hold tendencies 281 | real(r8), allocatable, dimension(:,:) :: t_dt_tmp ! temporary to hold tendencies 282 | real(r8), allocatable, dimension(:,:,:) :: t_tendadj ! temporary array to temperature tendency adjustment 283 | real(r8), allocatable, dimension(:,:,:) :: u_dt ! temporary to hold tendencies 284 | real(r8), allocatable, dimension(:,:) :: u_dt_tmp ! temporary to hold tendencies 285 | real(r8), allocatable, dimension(:,:) :: u_tmp ! temporary array to hold u and v 286 | real(r8), allocatable, dimension(:,:,:) :: v_dt ! temporary to hold tendencies 287 | real(r8), allocatable, dimension(:,:) :: v_dt_tmp ! temporary to hold tendencies 288 | real(r8), allocatable, dimension(:,:) :: v_tmp ! temporary array to hold u and v 289 | real(r8), allocatable, dimension(:,:,:) :: q_tmp ! temporary to hold 290 | 291 | !----------------------------------------------------------------------- 292 | 293 | if (.not. local_dp_map) then 294 | call endrun('p_d_coupling: Weak scaling does not support load balancing') 295 | end if 296 | 297 | Atm=>dyn_in%atm 298 | 299 | is = Atm(mytile)%bd%is 300 | ie = Atm(mytile)%bd%ie 301 | js = Atm(mytile)%bd%js 302 | je = Atm(mytile)%bd%je 303 | isd = Atm(mytile)%bd%isd 304 | ied = Atm(mytile)%bd%ied 305 | jsd = Atm(mytile)%bd%jsd 306 | jed = Atm(mytile)%bd%jed 307 | 308 | call set_domain ( Atm(mytile)%domain ) 309 | 310 | allocate(delpdry(isd:ied,jsd:jed,nlev)) ; delpdry = 0._r8 311 | allocate(t_dt_tmp((ie-is+1)*(je-js+1),pver)) ; t_dt_tmp = 0._r8 312 | allocate(u_dt_tmp((ie-is+1)*(je-js+1),pver)) ; u_dt_tmp = 0._r8 313 | allocate(v_dt_tmp((ie-is+1)*(je-js+1),pver)) ; v_dt_tmp = 0._r8 314 | allocate(pdel_tmp((ie-is+1)*(je-js+1),pver)) ; pdel_tmp = 0._r8 315 | allocate(pdeldry_tmp((ie-is+1)*(je-js+1),pver)) ; pdeldry_tmp = 0._r8 316 | allocate(U_tmp((ie-is+1)*(je-js+1),pver)) ; U_tmp = 0._r8 317 | allocate(V_tmp((ie-is+1)*(je-js+1),pver)) ; V_tmp = 0._r8 318 | allocate(Q_tmp((ie-is+1)*(je-js+1),pver,pcnst)) ; Q_tmp = 0._r8 319 | allocate(u_dt(isd:ied,jsd:jed,nlev)) ; u_dt = 0._r8 320 | allocate(v_dt(isd:ied,jsd:jed,nlev)) ; v_dt = 0._r8 321 | allocate(t_dt(is:ie,js:je,nlev)) ; t_dt = 0._r8 322 | allocate(t_tendadj(is:ie,js:je,nlev)) ; t_tendadj = 0._r8 323 | 324 | Atm=>dyn_in%atm 325 | 326 | call t_startf('pd_copy') 327 | !$omp parallel do private (col_ind, lchnk, icol, ie, blk_ind, ilyr, m) 328 | do col_ind = 1, columns_on_task 329 | call get_dyn_col_p(col_ind, blk_num, blk_ind) 330 | call get_chunk_info_p(col_ind, lchnk, icol) 331 | do ilyr = 1, pver 332 | t_dt_tmp(blk_ind(1),ilyr) = phys_tend(lchnk)%dtdt(icol,ilyr) 333 | u_tmp(blk_ind(1),ilyr) = phys_state(lchnk)%u(icol,ilyr) 334 | v_tmp(blk_ind(1),ilyr) = phys_state(lchnk)%v(icol,ilyr) 335 | u_dt_tmp(blk_ind(1),ilyr) = phys_tend(lchnk)%dudt(icol,ilyr) 336 | v_dt_tmp(blk_ind(1),ilyr) = phys_tend(lchnk)%dvdt(icol,ilyr) 337 | pdel_tmp(blk_ind(1),ilyr) = phys_state(lchnk)%pdel(icol,ilyr) 338 | pdeldry_tmp(blk_ind(1),ilyr) = phys_state(lchnk)%pdeldry(icol,ilyr) 339 | do m = 1, pcnst 340 | Q_tmp(blk_ind(1),ilyr,m) = phys_state(lchnk)%q(icol,ilyr,m) 341 | end do 342 | end do 343 | end do 344 | 345 | dt = get_step_size() 346 | idim=ie-is+1 347 | 348 | ! pt_dt is adjusted below. 349 | n = 1 350 | do j = js, je 351 | do i = is, ie 352 | do k = 1, pver 353 | t_dt(i, j, k) = t_dt_tmp (n, k) 354 | u_dt(i, j, k) = u_dt_tmp (n, k) 355 | v_dt(i, j, k) = v_dt_tmp (n, k) 356 | Atm(mytile)%ua(i, j, k) = Atm(mytile)%ua(i, j, k) + u_dt(i, j, k)*dt 357 | Atm(mytile)%va(i, j, k) = Atm(mytile)%va(i, j, k) + v_dt(i, j, k)*dt 358 | Atm(mytile)%delp(i, j, k) = pdel_tmp (n, k) 359 | delpdry(i, j, k) = pdeldry_tmp (n, k) 360 | do m = 1, pcnst 361 | ! dynamics tracers may be in a different order from cam tracer array 362 | m_ffsl=qsize_tracer_idx_cam2dyn(m) 363 | Atm(mytile)%q(i, j, k, m_ffsl) = Q_tmp(n, k, m) 364 | end do 365 | end do 366 | n = n + 1 367 | end do 368 | end do 369 | 370 | ! Update delp and mixing ratios to account for the difference between CAM and FV3 total air mass 371 | ! CAM total air mass (pdel) = (dry + vapor) 372 | ! FV3 total air mass (delp at beg of phys * mix ratio) = 373 | ! drymass + (vapor + condensate [liq_wat,ice_wat,rainwat,snowwat,graupel])*mix ratio 374 | ! FV3 tracer mixing ratios = tracer mass / FV3 total air mass 375 | ! convert the (dry+vap) mixing ratios to be based off of FV3 condensate loaded airmass (dry+vap+cond). When 376 | ! d_p_coupling/derive_phys_dry is called the mixing ratios are again parsed out into wet and 377 | ! dry for physics. 378 | num_wet_species=thermodynamic_active_species_num-dry_air_species_num 379 | ! recalculate ps based on new delp 380 | Atm(mytile)%ps(:,:)=hyai(1)*ps0 381 | do k=1,pver 382 | do j = js,je 383 | do i = is,ie 384 | do m = 1,pcnst 385 | tracermass(m)=Atm(mytile)%delp(i,j,k)*Atm(mytile)%q(i,j,k,m) 386 | end do 387 | fv3_totwatermass=sum(tracermass(thermodynamic_active_species_idx_dycore(1:num_wet_species))) 388 | fv3_airmass = delpdry(i,j,k) + fv3_totwatermass 389 | Atm(mytile)%delp(i,j,k) = fv3_airmass 390 | Atm(mytile)%q(i,j,k,1:pcnst) = tracermass(1:pcnst)/fv3_airmass 391 | Atm(mytile)%ps(i,j)=Atm(mytile)%ps(i,j)+Atm(mytile)%delp(i, j, k) 392 | end do 393 | end do 394 | end do 395 | call t_stopf('pd_copy') 396 | 397 | ! update dynamics temperature from physics tendency 398 | ! if using fv3_lcv_moist adjust temperature tendency to conserve energy across phys/dynamics 399 | ! interface accounting for differences in the moist/wet assumptions 400 | 401 | do k = 1, pver 402 | do j = js, je 403 | do i = is, ie 404 | if (fv3_scale_ttend) then 405 | qall=0._r8 406 | cpfv3=0._r8 407 | do nq=1,thermodynamic_active_species_num 408 | m_ffsl = thermodynamic_active_species_idx_dycore(nq) 409 | qall=qall+Atm(mytile)%q(i,j,k,m_ffsl) 410 | if (fv3_lcp_moist) cpfv3 = cpfv3+thermodynamic_active_species_cp(nq)*Atm(mytile)%q(i,j,k,m_ffsl) 411 | if (fv3_lcv_moist) cpfv3 = cpfv3+thermodynamic_active_species_cv(nq)*Atm(mytile)%q(i,j,k,m_ffsl) 412 | end do 413 | cpfv3=(1._r8-qall)*cp_air+cpfv3 414 | ! scale factor for t_dt so temperature tendency derived from CAM moist air (dry+vap - constant pressure) 415 | ! can be applied to FV3 wet air (dry+vap+cond - constant volume) 416 | 417 | t_tendadj(i,j,k)=cp_air/cpfv3 418 | 419 | if (.not.Atm(mytile)%flagstruct%hydrostatic) then 420 | ! update to nonhydrostatic variable delz to account for phys temperature adjustment. 421 | Atm(mytile)%delz(i, j, k) = Atm(mytile)%delz(i,j,k)/Atm(mytile)%pt(i, j, k) 422 | Atm(mytile)%pt (i, j, k) = Atm(mytile)%pt (i, j, k) + t_dt(i, j, k)*dt*t_tendadj(i,j,k) 423 | Atm(mytile)%delz(i, j, k) = Atm(mytile)%delz(i,j,k)*Atm(mytile)%pt (i, j, k) 424 | else 425 | Atm(mytile)%pt (i, j, k) = Atm(mytile)%pt (i, j, k) + t_dt(i, j, k)*dt*t_tendadj(i,j,k) 426 | end if 427 | else 428 | Atm(mytile)%pt (i, j, k) = Atm(mytile)%pt (i, j, k) + t_dt(i, j, k)*dt 429 | end if 430 | end do 431 | end do 432 | end do 433 | 434 | !$omp parallel do private(i, j) 435 | do j=js,je 436 | do i=is,ie 437 | Atm(mytile)%pe(i,1,j) = Atm(mytile)%ptop 438 | Atm(mytile)%pk(i,j,1) = Atm(mytile)%ptop ** kappa 439 | Atm(mytile)%peln(i,1,j) = log(Atm(mytile)%ptop ) 440 | enddo 441 | enddo 442 | 443 | !$omp parallel do private(i,j,k) 444 | do j=js,je 445 | do k=1,pver 446 | do i=is,ie 447 | Atm(mytile)%pe(i,k+1,j) = Atm(mytile)%pe(i,k,j) + Atm(mytile)%delp(i,j,k) 448 | enddo 449 | enddo 450 | enddo 451 | 452 | !$omp parallel do private(i,j,k) 453 | do j=js,je 454 | do k=1,pver 455 | do i=is,ie 456 | Atm(mytile)%pk(i,j,k+1)= Atm(mytile)%pe(i,k+1,j) ** kappa 457 | Atm(mytile)%peln(i,k+1,j) = log(Atm(mytile)%pe(i,k+1,j)) 458 | Atm(mytile)%pkz(i,j,k) = (Atm(mytile)%pk(i,j,k+1)-Atm(mytile)%pk(i,j,k))/ & 459 | (kappa*(Atm(mytile)%peln(i,k+1,j)-Atm(mytile)%peln(i,k,j))) 460 | enddo 461 | enddo 462 | enddo 463 | 464 | do j = js, je 465 | call outfld('FU', RESHAPE(u_dt(is:ie, j, :),(/idim,pver/)), idim, j) 466 | call outfld('FV', RESHAPE(v_dt(is:ie, j, :),(/idim,pver/)), idim, j) 467 | call outfld('FT', RESHAPE(t_dt(is:ie, j, :),(/idim,pver/)), idim, j) 468 | end do 469 | 470 | call calc_tot_energy_dynamics(dyn_in%atm,'dAP') 471 | 472 | !set the D-Grid winds from the physics A-grid winds/tendencies. 473 | if ( Atm(mytile)%flagstruct%dwind_2d ) then 474 | call endrun('dwind_2d update is not implemented') 475 | else 476 | call atend2dstate3d( u_dt, v_dt, Atm(mytile)%u ,Atm(mytile)%v, is, ie, js, je, & 477 | isd, ied, jsd, jed, npx,npy, nlev, Atm(mytile)%gridstruct, Atm(mytile)%domain, dt) 478 | endif 479 | 480 | ! Again we are rederiving the A winds from the Dwinds to give our energy dynamics a consistent wind. 481 | call cubed_to_latlon(Atm(mytile)%u, Atm(mytile)%v, Atm(mytile)%ua, Atm(mytile)%va, Atm(mytile)%gridstruct, & 482 | npx, npy, nlev, 1, Atm(mytile)%gridstruct%grid_type, Atm(mytile)%domain, & 483 | Atm(mytile)%gridstruct%nested, Atm(mytile)%flagstruct%c2l_ord, Atm(mytile)%bd) 484 | 485 | !$omp parallel do private(i, j) 486 | do j=js,je 487 | do i=is,ie 488 | Atm(mytile)%u_srf=Atm(mytile)%ua(i,j,pver) 489 | Atm(mytile)%v_srf=Atm(mytile)%va(i,j,pver) 490 | enddo 491 | enddo 492 | 493 | ! update halo regions 494 | call mpp_update_domains( Atm(mytile)%delp, Atm(mytile)%domain ) 495 | call mpp_update_domains( Atm(mytile)%ps, Atm(mytile)%domain ) 496 | call mpp_update_domains( Atm(mytile)%phis, Atm(mytile)%domain ) 497 | call mpp_update_domains( atm(mytile)%ps, Atm(mytile)%domain ) 498 | call mpp_update_domains( atm(mytile)%u,atm(mytile)%v, Atm(mytile)%domain, gridtype=DGRID_NE, complete=.true. ) 499 | call mpp_update_domains( atm(mytile)%pt, Atm(mytile)%domain ) 500 | call mpp_update_domains( atm(mytile)%q, Atm(mytile)%domain ) 501 | 502 | deallocate(delpdry) 503 | deallocate(t_dt_tmp) 504 | deallocate(u_dt_tmp) 505 | deallocate(v_dt_tmp) 506 | deallocate(pdel_tmp) 507 | deallocate(pdeldry_tmp) 508 | deallocate(U_tmp) 509 | deallocate(V_tmp) 510 | deallocate(Q_tmp) 511 | deallocate(u_dt) 512 | deallocate(v_dt) 513 | deallocate(t_dt) 514 | deallocate(t_tendadj) 515 | 516 | end subroutine p_d_coupling 517 | 518 | !======================================================================= 519 | 520 | subroutine derived_phys_dry(phys_state, phys_tend, pbuf2d) 521 | 522 | use check_energy, only: check_energy_timestep_init 523 | use constituents, only: qmin 524 | use geopotential, only: geopotential_t 525 | use physics_buffer, only: physics_buffer_desc, pbuf_get_chunk 526 | use physics_types, only: set_wet_to_dry 527 | use air_composition, only: thermodynamic_active_species_num,thermodynamic_active_species_idx_dycore 528 | use air_composition, only: thermodynamic_active_species_idx,dry_air_species_num 529 | use ppgrid, only: pver 530 | use qneg_module, only: qneg3 531 | use shr_vmath_mod, only: shr_vmath_log 532 | 533 | ! arguments 534 | type(physics_state), intent(inout), dimension(begchunk:endchunk) :: phys_state 535 | type(physics_tend ), intent(inout), dimension(begchunk:endchunk) :: phys_tend 536 | type(physics_buffer_desc), pointer :: pbuf2d(:,:) 537 | 538 | ! local variables 539 | 540 | integer :: num_wet_species ! total number of wet species (first tracers in FV3 tracer array) 541 | integer :: lchnk 542 | integer :: m, i, k, ncol 543 | 544 | real(r8) :: cam_totwatermass, cam_airmass 545 | real(r8), dimension(pcnst) :: tracermass 546 | real(r8), dimension(pcols,pver) :: zvirv ! Local zvir array pointer 547 | 548 | !---------------------------------------------------------------------------- 549 | 550 | type(physics_buffer_desc), pointer :: pbuf_chnk(:) 551 | 552 | ! 553 | ! Evaluate derived quantities 554 | ! 555 | ! At this point the phys_state has been filled in from dynamics, rearranging tracers to match CAM tracer order. 556 | ! pdel is consistent with tracer array. 557 | ! All tracer mixing rations at this point are calculated using dry+vap+condensates - we need to convert 558 | ! to cam physics wet mixing ration based off of dry+vap. 559 | ! Following this loop call wet_to_dry to convert CAM's dry constituents to their dry mixing ratio. 560 | 561 | !!! omp parallel do private (lchnk, ncol, k, i, zvirv, pbuf_chnk,m,cam_airmass,cam_totwatermass) 562 | num_wet_species=thermodynamic_active_species_num-dry_air_species_num 563 | do lchnk = begchunk,endchunk 564 | ncol = get_ncols_p(lchnk) 565 | do k=1,pver 566 | do i=1,ncol 567 | phys_state(lchnk)%pdeldry(i,k) = & 568 | phys_state(lchnk)%pdel(i,k) * & 569 | (1._r8-sum(phys_state(lchnk)%q(i,k,thermodynamic_active_species_idx(1:num_wet_species)))) 570 | do m = 1,pcnst 571 | tracermass(m)=phys_state(lchnk)%pdel(i,k)*phys_state(lchnk)%q(i,k,m) 572 | end do 573 | cam_totwatermass=tracermass(1) 574 | cam_airmass = phys_state(lchnk)%pdeldry(i,k) + cam_totwatermass 575 | phys_state(lchnk)%pdel(i,k) = cam_airmass 576 | phys_state(lchnk)%q(i,k,1:pcnst) = tracermass(1:pcnst)/cam_airmass 577 | end do 578 | end do 579 | 580 | ! Physics state now has CAM pdel (dry+vap) and pdeldry and all constituents are dry+vap 581 | ! Convert dry type constituents from moist to dry mixing ratio 582 | ! 583 | call set_wet_to_dry(phys_state(lchnk), convert_cnst_type='dry') ! Dynamics had moist, physics wants dry. 584 | 585 | ! 586 | ! Derive the rest of the pressure variables using pdel and pdeldry 587 | ! 588 | 589 | do i = 1, ncol 590 | phys_state(lchnk)%psdry(i) = hyai(1)*ps0 + sum(phys_state(lchnk)%pdeldry(i,:)) 591 | end do 592 | 593 | do i = 1, ncol 594 | phys_state(lchnk)%pintdry(i,1) = hyai(1)*ps0 595 | end do 596 | call shr_vmath_log(phys_state(lchnk)%pintdry(1:ncol,1), & 597 | phys_state(lchnk)%lnpintdry(1:ncol,1),ncol) 598 | do k = 1, pver 599 | do i = 1, ncol 600 | phys_state(lchnk)%pintdry(i,k+1) = phys_state(lchnk)%pintdry(i,k) + & 601 | phys_state(lchnk)%pdeldry(i,k) 602 | end do 603 | call shr_vmath_log(phys_state(lchnk)%pintdry(1:ncol,k+1),& 604 | phys_state(lchnk)%lnpintdry(1:ncol,k+1),ncol) 605 | end do 606 | 607 | do k=1,pver 608 | do i=1,ncol 609 | phys_state(lchnk)%rpdeldry(i,k) = 1._r8/phys_state(lchnk)%pdeldry(i,k) 610 | phys_state(lchnk)%pmiddry (i,k) = 0.5_r8*(phys_state(lchnk)%pintdry(i,k+1) + & 611 | phys_state(lchnk)%pintdry(i,k)) 612 | end do 613 | call shr_vmath_log(phys_state(lchnk)%pmiddry(1:ncol,k), & 614 | phys_state(lchnk)%lnpmiddry(1:ncol,k),ncol) 615 | end do 616 | 617 | ! initialize moist pressure variables 618 | 619 | do i=1,ncol 620 | phys_state(lchnk)%ps(i) = phys_state(lchnk)%pintdry(i,1) 621 | phys_state(lchnk)%pint(i,1) = phys_state(lchnk)%pintdry(i,1) 622 | end do 623 | do k = 1, pver 624 | do i=1,ncol 625 | phys_state(lchnk)%pint(i,k+1) = phys_state(lchnk)%pint(i,k)+phys_state(lchnk)%pdel(i,k) 626 | phys_state(lchnk)%pmid(i,k) = (phys_state(lchnk)%pint(i,k+1)+phys_state(lchnk)%pint(i,k))/2._r8 627 | phys_state(lchnk)%ps (i) = phys_state(lchnk)%ps(i) + phys_state(lchnk)%pdel(i,k) 628 | end do 629 | call shr_vmath_log(phys_state(lchnk)%pint(1:ncol,k),phys_state(lchnk)%lnpint(1:ncol,k),ncol) 630 | call shr_vmath_log(phys_state(lchnk)%pmid(1:ncol,k),phys_state(lchnk)%lnpmid(1:ncol,k),ncol) 631 | end do 632 | call shr_vmath_log(phys_state(lchnk)%pint(1:ncol,pverp),phys_state(lchnk)%lnpint(1:ncol,pverp),ncol) 633 | 634 | do k = 1, pver 635 | do i = 1, ncol 636 | phys_state(lchnk)%rpdel(i,k) = 1._r8/phys_state(lchnk)%pdel(i,k) 637 | phys_state(lchnk)%exner (i,k) = (phys_state(lchnk)%pint(i,pver+1) & 638 | / phys_state(lchnk)%pmid(i,k))**cappa 639 | end do 640 | end do 641 | 642 | ! fill zvirv 2D variables to be compatible with geopotential_t interface 643 | zvirv(:,:) = zvir 644 | 645 | ! Compute initial geopotential heights - based on full pressure 646 | call geopotential_t (phys_state(lchnk)%lnpint, phys_state(lchnk)%lnpmid , phys_state(lchnk)%pint , & 647 | phys_state(lchnk)%pmid , phys_state(lchnk)%pdel , phys_state(lchnk)%rpdel , & 648 | phys_state(lchnk)%t , phys_state(lchnk)%q(:,:,:), rairv(:,:,lchnk), gravit, zvirv , & 649 | phys_state(lchnk)%zi , phys_state(lchnk)%zm , ncol ) 650 | 651 | ! Compute initial dry static energy, include surface geopotential 652 | do k = 1, pver 653 | do i = 1, ncol 654 | phys_state(lchnk)%s(i,k) = cpair*phys_state(lchnk)%t(i,k) & 655 | + gravit*phys_state(lchnk)%zm(i,k) + phys_state(lchnk)%phis(i) 656 | end do 657 | end do 658 | ! Ensure tracers are all positive 659 | call qneg3('D_P_COUPLING',lchnk ,ncol ,pcols ,pver , & 660 | 1, pcnst, qmin ,phys_state(lchnk)%q) 661 | 662 | ! Compute energy and water integrals of input state 663 | pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk) 664 | call check_energy_timestep_init(phys_state(lchnk), phys_tend(lchnk), pbuf_chnk) 665 | 666 | end do ! lchnk 667 | 668 | end subroutine derived_phys_dry 669 | 670 | subroutine atend2dstate3d(u_dt, v_dt, u, v, is, ie, js, je, isd, ied, jsd, jed, npx,npy, nlev, gridstruct, domain, dt) 671 | !---------------------------------------------------------------------------- 672 | ! This routine adds the a-grid wind tendencies returned by the physics to the d-state 673 | ! wind being sent to the dynamics. 674 | !---------------------------------------------------------------------------- 675 | 676 | use fv_arrays_mod, only: fv_grid_type 677 | use mpp_domains_mod, only: mpp_update_domains, DGRID_NE 678 | 679 | ! arguments 680 | integer, intent(in) :: npx,npy, nlev 681 | integer, intent(in) :: is, ie, js, je,& 682 | isd, ied, jsd, jed 683 | real(r8), intent(in) :: dt 684 | real(r8), intent(inout), dimension(isd:ied,jsd:jed,nlev) :: u_dt, v_dt 685 | real(r8), intent(inout), dimension(isd:ied, jsd:jed+1,nlev) :: u 686 | real(r8), intent(inout), dimension(isd:ied+1,jsd:jed ,nlev) :: v 687 | type(domain2d), intent(inout) :: domain 688 | type(fv_grid_type), intent(in), target :: gridstruct 689 | 690 | ! local: 691 | 692 | integer i, j, k, im2, jm2 693 | real(r8) dt5 694 | real(r8), dimension(is-1:ie+1,js:je+1,3) :: ue ! 3D winds at edges 695 | real(r8), dimension(is-1:ie+1,js-1:je+1,3) :: v3 696 | real(r8), dimension(is:ie+1,js-1:je+1, 3) :: ve ! 3D winds at edges 697 | real(r8), dimension(is:ie) :: ut1, ut2, ut3 698 | real(r8), dimension(js:je) :: vt1, vt2, vt3 699 | real(r8), pointer, dimension(:) :: edge_vect_w, edge_vect_e, edge_vect_s, edge_vect_n 700 | real(r8), pointer, dimension(:,:,:) :: vlon, vlat 701 | real(r8), pointer, dimension(:,:,:,:) :: es, ew 702 | 703 | !---------------------------------------------------------------------------- 704 | 705 | es => gridstruct%es 706 | ew => gridstruct%ew 707 | vlon => gridstruct%vlon 708 | vlat => gridstruct%vlat 709 | 710 | edge_vect_w => gridstruct%edge_vect_w 711 | edge_vect_e => gridstruct%edge_vect_e 712 | edge_vect_s => gridstruct%edge_vect_s 713 | edge_vect_n => gridstruct%edge_vect_n 714 | 715 | call mpp_update_domains(u_dt, domain, complete=.false.) 716 | call mpp_update_domains(v_dt, domain, complete=.true.) 717 | 718 | dt5 = 0.5_r8 * dt 719 | im2 = (npx-1)/2 720 | jm2 = (npy-1)/2 721 | 722 | !$OMP parallel do default(none) shared(is,ie,js,je,nlev,gridstruct,u,dt5,u_dt,v,v_dt, & 723 | !$OMP vlon,vlat,jm2,edge_vect_w,npx,edge_vect_e,im2, & 724 | !$OMP edge_vect_s,npy,edge_vect_n,es,ew) & 725 | !$OMP private(ut1, ut2, ut3, vt1, vt2, vt3, ue, ve, v3) 726 | do k=1, nlev 727 | 728 | ! Compute 3D wind/tendency on A grid 729 | do j=js-1,je+1 730 | do i=is-1,ie+1 731 | v3(i,j,1) = u_dt(i,j,k)*vlon(i,j,1) + v_dt(i,j,k)*vlat(i,j,1) 732 | v3(i,j,2) = u_dt(i,j,k)*vlon(i,j,2) + v_dt(i,j,k)*vlat(i,j,2) 733 | v3(i,j,3) = u_dt(i,j,k)*vlon(i,j,3) + v_dt(i,j,k)*vlat(i,j,3) 734 | enddo 735 | enddo 736 | 737 | ! Interpolate to cell edges 738 | do j=js,je+1 739 | do i=is-1,ie+1 740 | ue(i,j,1) = v3(i,j-1,1) + v3(i,j,1) 741 | ue(i,j,2) = v3(i,j-1,2) + v3(i,j,2) 742 | ue(i,j,3) = v3(i,j-1,3) + v3(i,j,3) 743 | enddo 744 | enddo 745 | 746 | do j=js-1,je+1 747 | do i=is,ie+1 748 | ve(i,j,1) = v3(i-1,j,1) + v3(i,j,1) 749 | ve(i,j,2) = v3(i-1,j,2) + v3(i,j,2) 750 | ve(i,j,3) = v3(i-1,j,3) + v3(i,j,3) 751 | enddo 752 | enddo 753 | 754 | ! --- E_W edges (for v-wind): 755 | if (.not. gridstruct%nested) then 756 | if ( is==1) then 757 | i = 1 758 | do j=js,je 759 | if ( j>jm2 ) then 760 | vt1(j) = edge_vect_w(j)*ve(i,j-1,1)+(1._r8-edge_vect_w(j))*ve(i,j,1) 761 | vt2(j) = edge_vect_w(j)*ve(i,j-1,2)+(1._r8-edge_vect_w(j))*ve(i,j,2) 762 | vt3(j) = edge_vect_w(j)*ve(i,j-1,3)+(1._r8-edge_vect_w(j))*ve(i,j,3) 763 | else 764 | vt1(j) = edge_vect_w(j)*ve(i,j+1,1)+(1._r8-edge_vect_w(j))*ve(i,j,1) 765 | vt2(j) = edge_vect_w(j)*ve(i,j+1,2)+(1._r8-edge_vect_w(j))*ve(i,j,2) 766 | vt3(j) = edge_vect_w(j)*ve(i,j+1,3)+(1._r8-edge_vect_w(j))*ve(i,j,3) 767 | endif 768 | enddo 769 | do j=js,je 770 | ve(i,j,1) = vt1(j) 771 | ve(i,j,2) = vt2(j) 772 | ve(i,j,3) = vt3(j) 773 | enddo 774 | endif 775 | 776 | if ( (ie+1)==npx ) then 777 | i = npx 778 | do j=js,je 779 | if ( j>jm2 ) then 780 | vt1(j) = edge_vect_e(j)*ve(i,j-1,1)+(1._r8-edge_vect_e(j))*ve(i,j,1) 781 | vt2(j) = edge_vect_e(j)*ve(i,j-1,2)+(1._r8-edge_vect_e(j))*ve(i,j,2) 782 | vt3(j) = edge_vect_e(j)*ve(i,j-1,3)+(1._r8-edge_vect_e(j))*ve(i,j,3) 783 | else 784 | vt1(j) = edge_vect_e(j)*ve(i,j+1,1)+(1._r8-edge_vect_e(j))*ve(i,j,1) 785 | vt2(j) = edge_vect_e(j)*ve(i,j+1,2)+(1._r8-edge_vect_e(j))*ve(i,j,2) 786 | vt3(j) = edge_vect_e(j)*ve(i,j+1,3)+(1._r8-edge_vect_e(j))*ve(i,j,3) 787 | endif 788 | enddo 789 | do j=js,je 790 | ve(i,j,1) = vt1(j) 791 | ve(i,j,2) = vt2(j) 792 | ve(i,j,3) = vt3(j) 793 | enddo 794 | endif 795 | ! N-S edges (for u-wind): 796 | if ( js==1) then 797 | j = 1 798 | do i=is,ie 799 | if ( i>im2 ) then 800 | ut1(i) = edge_vect_s(i)*ue(i-1,j,1)+(1._r8-edge_vect_s(i))*ue(i,j,1) 801 | ut2(i) = edge_vect_s(i)*ue(i-1,j,2)+(1._r8-edge_vect_s(i))*ue(i,j,2) 802 | ut3(i) = edge_vect_s(i)*ue(i-1,j,3)+(1._r8-edge_vect_s(i))*ue(i,j,3) 803 | else 804 | ut1(i) = edge_vect_s(i)*ue(i+1,j,1)+(1._r8-edge_vect_s(i))*ue(i,j,1) 805 | ut2(i) = edge_vect_s(i)*ue(i+1,j,2)+(1._r8-edge_vect_s(i))*ue(i,j,2) 806 | ut3(i) = edge_vect_s(i)*ue(i+1,j,3)+(1._r8-edge_vect_s(i))*ue(i,j,3) 807 | endif 808 | enddo 809 | do i=is,ie 810 | ue(i,j,1) = ut1(i) 811 | ue(i,j,2) = ut2(i) 812 | ue(i,j,3) = ut3(i) 813 | enddo 814 | endif 815 | if ( (je+1)==npy ) then 816 | j = npy 817 | do i=is,ie 818 | if ( i>im2 ) then 819 | ut1(i) = edge_vect_n(i)*ue(i-1,j,1)+(1._r8-edge_vect_n(i))*ue(i,j,1) 820 | ut2(i) = edge_vect_n(i)*ue(i-1,j,2)+(1._r8-edge_vect_n(i))*ue(i,j,2) 821 | ut3(i) = edge_vect_n(i)*ue(i-1,j,3)+(1._r8-edge_vect_n(i))*ue(i,j,3) 822 | else 823 | ut1(i) = edge_vect_n(i)*ue(i+1,j,1)+(1._r8-edge_vect_n(i))*ue(i,j,1) 824 | ut2(i) = edge_vect_n(i)*ue(i+1,j,2)+(1._r8-edge_vect_n(i))*ue(i,j,2) 825 | ut3(i) = edge_vect_n(i)*ue(i+1,j,3)+(1._r8-edge_vect_n(i))*ue(i,j,3) 826 | endif 827 | enddo 828 | do i=is,ie 829 | ue(i,j,1) = ut1(i) 830 | ue(i,j,2) = ut2(i) 831 | ue(i,j,3) = ut3(i) 832 | enddo 833 | endif 834 | 835 | endif ! .not. nested 836 | 837 | do j=js,je+1 838 | do i=is,ie 839 | u(i,j,k) = u(i,j,k) + dt5*( ue(i,j,1)*es(1,i,j,1) + & 840 | ue(i,j,2)*es(2,i,j,1) + & 841 | ue(i,j,3)*es(3,i,j,1) ) 842 | enddo 843 | enddo 844 | do j=js,je 845 | do i=is,ie+1 846 | v(i,j,k) = v(i,j,k) + dt5*( ve(i,j,1)*ew(1,i,j,2) + & 847 | ve(i,j,2)*ew(2,i,j,2) + & 848 | ve(i,j,3)*ew(3,i,j,2) ) 849 | enddo 850 | enddo 851 | enddo ! k-loop 852 | 853 | call mpp_update_domains(u, v, domain, gridtype=DGRID_NE) 854 | 855 | end subroutine atend2dstate3d 856 | 857 | 858 | subroutine fv3_tracer_diags(atm) 859 | 860 | ! Dry/Wet surface pressure diagnostics 861 | 862 | use constituents, only: pcnst 863 | use dimensions_mod, only: nlev,cnst_name_ffsl 864 | use dyn_grid, only: mytile 865 | use fv_arrays_mod, only: fv_atmos_type 866 | use air_composition, only: thermodynamic_active_species_num,thermodynamic_active_species_idx_dycore, & 867 | dry_air_species_num 868 | use fv_eta_mod, only: get_eta_level 869 | ! arguments 870 | type (fv_atmos_type), intent(in), pointer :: Atm(:) 871 | 872 | ! Locals 873 | integer :: i, j ,k, m,is,ie,js,je 874 | integer :: num_wet_species ! total number of wet species 875 | integer :: kstrat,ng 876 | real(r8) :: global_ps,global_dryps 877 | real(r8) :: qm_strat 878 | real(r8) :: qtot(pcnst), psum 879 | real(r8), allocatable, dimension(:,:,:) :: delpdry, psq 880 | real(r8), allocatable, dimension(:,:) :: psdry, q_strat 881 | real(r8), allocatable, dimension(:) :: phalf,pfull 882 | real(r8) :: p_ref = 1.E5_r8 !< Surface pressure used to construct a horizontally-uniform reference 883 | !---------------------------------------------------------------------------- 884 | 885 | is = Atm(mytile)%bd%is 886 | ie = Atm(mytile)%bd%ie 887 | js = Atm(mytile)%bd%js 888 | je = Atm(mytile)%bd%je 889 | ng = Atm(mytile)%ng 890 | 891 | allocate(delpdry(is:ie,js:je,nlev)) 892 | allocate(psdry(is:ie,js:je)) 893 | allocate(psq(is:ie,js:je,pcnst)) 894 | allocate(q_strat(is:ie,js:je)) 895 | num_wet_species=thermodynamic_active_species_num-dry_air_species_num 896 | do k=1,nlev 897 | do j = js, je 898 | do i = is, ie 899 | delpdry(i,j,k) = Atm(mytile)%delp(i,j,k) * & 900 | (1.0_r8-sum(Atm(mytile)%q(i,j,k,thermodynamic_active_species_idx_dycore(1:num_wet_species)))) 901 | end do 902 | end do 903 | end do 904 | ! 905 | ! get psdry 906 | ! 907 | do j = js, je 908 | do i = is, ie 909 | psdry(i,j) = hyai(1)*ps0 + sum(delpdry(i,j,:)) 910 | end do 911 | end do 912 | 913 | global_ps = g_sum(Atm(mytile)%domain, Atm(mytile)%ps(is:ie,js:je), is, ie, js, je, & 914 | Atm(mytile)%ng, Atm(mytile)%gridstruct%area_64, 1) 915 | global_dryps = g_sum(Atm(mytile)%domain, psdry(is:ie,js:je), is, ie, js, je, & 916 | Atm(mytile)%ng, Atm(mytile)%gridstruct%area_64, 1) 917 | !------------------- 918 | ! Vertical mass sum for all tracers 919 | !------------------- 920 | psq(:,:,:) = 0._r8 921 | do m=1,pcnst 922 | call z_sum(Atm,is,ie,js,je,nlev,Atm(mytile)%q(is:ie,js:je,1:nlev,m),psq(is:ie,js:je,m)) 923 | end do 924 | ! Mean water vapor in the "stratosphere" (75 mb and above): 925 | qm_strat = 0._r8 926 | allocate ( phalf(Atm(mytile)%npz+1) ) 927 | allocate ( pfull(Atm(mytile)%npz) ) 928 | call get_eta_level(Atm(mytile)%npz, p_ref, pfull, phalf, Atm(mytile)%ak, Atm(mytile)%bk, 0.01_r8) 929 | if ( phalf(2)< 75._r8 ) then 930 | kstrat = 1 931 | do k=2,nlev 932 | if ( phalf(k+1) > 75._r8 ) exit 933 | kstrat = k 934 | enddo 935 | call z_sum(Atm,is,ie,js,je, kstrat, Atm(mytile)%q(is:ie,js:je,1:kstrat,1 ), q_strat,psum) 936 | qm_strat = g_sum(Atm(mytile)%domain, q_strat(is:ie,js:je), is, ie, js, je, & 937 | Atm(mytile)%ng, Atm(mytile)%gridstruct%area_64, 1) * 1.e6_r8 / psum 938 | endif 939 | 940 | !------------------- 941 | ! Get global mean mass for all tracers 942 | !------------------- 943 | do m=1,pcnst 944 | qtot(m) = g_sum(Atm(mytile)%domain, psq(is,js,m), is, ie, js, je, & 945 | Atm(mytile)%ng, Atm(mytile)%gridstruct%area_64, 1)/gravit 946 | enddo 947 | 948 | if (masterproc) then 949 | write(iulog,*)'Total Surface Pressure (mb) = ',global_ps/100.0_r8,"hPa" 950 | write(iulog,*)'Mean Dry Surface Pressure (mb) = ',global_dryps/100.0_r8,"hPa" 951 | write(iulog,*)'Mean specific humidity (mg/kg) above 75 mb = ',qm_strat 952 | do m=1,pcnst 953 | write(iulog,*)' Total '//cnst_name_ffsl(m)//' (kg/m**2) = ',qtot(m) 954 | enddo 955 | end if 956 | 957 | 958 | deallocate(delpdry) 959 | deallocate(psdry) 960 | deallocate(psq) 961 | deallocate(pfull) 962 | deallocate(phalf) 963 | deallocate(q_strat) 964 | end subroutine fv3_tracer_diags 965 | 966 | 967 | subroutine z_sum(atm,is,ie,js,je,km,q,msum,gpsum) 968 | 969 | ! vertical integral 970 | 971 | use fv_arrays_mod, only: fv_atmos_type 972 | 973 | ! arguments 974 | 975 | type (fv_atmos_type), intent(in), pointer :: Atm(:) 976 | integer, intent(in) :: is, ie, js, je 977 | integer, intent(in) :: km 978 | real(r8), intent(in), dimension(is:ie, js:je, km) :: q 979 | real(r8), intent(out), dimension(is:ie,js:je) :: msum 980 | real(r8), intent(out), optional :: gpsum 981 | 982 | ! LOCAL VARIABLES 983 | integer :: i,j,k 984 | real(r8), dimension(is:ie,js:je) :: psum 985 | !---------------------------------------------------------------------------- 986 | msum=0._r8 987 | psum=0._r8 988 | do j=js,je 989 | do i=is,ie 990 | msum(i,j) = Atm(mytile)%delp(i,j,1)*q(i,j,1) 991 | psum(i,j) = Atm(mytile)%delp(i,j,1) 992 | enddo 993 | do k=2,km 994 | do i=is,ie 995 | msum(i,j) = msum(i,j) + Atm(mytile)%delp(i,j,k)*q(i,j,k) 996 | psum(i,j) = psum(i,j) + Atm(mytile)%delp(i,j,k) 997 | enddo 998 | enddo 999 | enddo 1000 | if (present(gpsum)) then 1001 | gpsum = g_sum(Atm(mytile)%domain, psum, is, ie, js, je, Atm(mytile)%ng, Atm(mytile)%gridstruct%area_64, 1) 1002 | end if 1003 | end subroutine z_sum 1004 | 1005 | end module dp_coupling 1006 | --------------------------------------------------------------------------------