├── .gitignore ├── Makefile ├── README.md ├── authorship ├── ACKNOWLEDGEMENTS.md ├── AUTHORS.md └── LICENSE ├── examples ├── autoTesting.py ├── single_phase │ ├── coarse_turb_channel_sp │ │ ├── dns.in │ │ └── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ ├── dhc_sp │ │ ├── dns.in │ │ ├── heat_transfer_sp.in │ │ └── post_sp.in │ ├── laminar_channel_sp │ │ ├── dns.in │ │ └── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ ├── laminar_ldc_2D_sp │ │ ├── README_test.md │ │ ├── data_ldc_re1000.txt │ │ ├── dns.in │ │ ├── test_ldc_sp.py │ │ └── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ └── laminar_ldc_sp │ │ ├── dns.in │ │ └── visu_ex │ │ ├── gen_xdmf.f90 │ │ ├── genview.sh │ │ └── param.h90 ├── two_phase_ht │ ├── coarse_two_layer_rb │ │ ├── dns.in │ │ ├── heat_transfer.in │ │ ├── post.in │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ ├── dhc │ │ ├── README_test.md │ │ ├── dns.in │ │ ├── heat_transfer.in │ │ ├── nusselt_ref.out │ │ ├── post.in │ │ ├── test_dhc.py │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ └── droplet_rb │ │ ├── bub.in │ │ ├── dns.in │ │ ├── heat_transfer.in │ │ ├── post.in │ │ ├── post_vof.in │ │ ├── visu_ex │ │ ├── gen_xdmf.f90 │ │ ├── genview.sh │ │ └── param.h90 │ │ └── vof.in ├── two_phase_inc_isot │ ├── coarse_turb_channel │ │ ├── dns.in │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ ├── laminar_channel │ │ ├── dns.in │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ ├── laminar_ldc │ │ ├── dns.in │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ ├── laminar_ldc_2D │ │ ├── README_test.md │ │ ├── data_ldc_re1000.txt │ │ ├── dns.in │ │ ├── test_ldc.py │ │ ├── visu_ex │ │ │ ├── gen_xdmf.f90 │ │ │ ├── genview.sh │ │ │ └── param.h90 │ │ └── vof.in │ └── rising_bubble_3d │ │ ├── README_test.md │ │ ├── dns.in │ │ ├── pos_vt_ref.out │ │ ├── test_bub.py │ │ ├── visu_ex │ │ ├── gen_xdmf.f90 │ │ ├── genview.sh │ │ └── param.h90 │ │ └── vof.in └── two_phase_inc_isot_turb │ └── abc_triperiodic │ ├── README_test.md │ ├── dns.in │ ├── forcing.in │ ├── ke_t_ref.out │ ├── test_abc.py │ ├── visu_ex │ ├── gen_xdmf.f90 │ ├── genview.sh │ └── param.h90 │ └── vof.in ├── getting_started ├── HOW_TO_USE.md ├── INFO_INPUT.md ├── INFO_VISU.md └── REQ.md ├── src ├── 2decomp │ ├── LICENSE │ ├── alloc.f90 │ ├── decomp_2d.f90 │ ├── factor.f90 │ ├── halo.f90 │ ├── halo_common.f90 │ ├── io.f90 │ ├── io_read_one.f90 │ ├── io_read_var.f90 │ ├── io_write_every.f90 │ ├── io_write_one.f90 │ ├── io_write_plane.f90 │ ├── io_write_var.f90 │ ├── transpose_x_to_y.f90 │ ├── transpose_x_to_z.f90 │ ├── transpose_y_to_x.f90 │ ├── transpose_y_to_z.f90 │ ├── transpose_z_to_x.f90 │ └── transpose_z_to_y.f90 ├── Makefile ├── apps │ ├── single_phase │ │ ├── app.single_phase │ │ ├── main__single_phase.f90 │ │ ├── param.f90 │ │ └── postp.single_phase │ │ │ ├── allocation_x.h90 │ │ │ ├── allocation_y.h90 │ │ │ ├── allocation_z.h90 │ │ │ ├── out1d.h90 │ │ │ ├── out2d.h90 │ │ │ ├── out3d.h90 │ │ │ ├── time_averaging_gas.h90 │ │ │ └── time_averaging_heat_transfer.h90 │ ├── two_phase_ht │ │ ├── app.two_phase_ht │ │ ├── main__two_phase_ht.f90 │ │ ├── param.f90 │ │ └── postp.two_phase_ht │ │ │ ├── allocation_x.h90 │ │ │ ├── allocation_y.h90 │ │ │ ├── allocation_z.h90 │ │ │ ├── out1d.h90 │ │ │ ├── out2d.h90 │ │ │ ├── out3d.h90 │ │ │ ├── time_averaging_gas.h90 │ │ │ ├── time_averaging_heat_transfer.h90 │ │ │ └── time_averaging_liquid.h90 │ ├── two_phase_inc_isot │ │ ├── app.two_phase_inc_isot │ │ ├── main__two_phase_inc_isot.f90 │ │ ├── param.f90 │ │ └── postp.two_phase_inc_isot │ │ │ ├── dropcheck.h90 │ │ │ ├── out1d.h90 │ │ │ ├── out2d.h90 │ │ │ └── out3d.h90 │ └── two_phase_inc_isot_turb │ │ ├── app.two_phase_inc_isot_turb │ │ ├── main__two_phase_inc_isot_turb.f90 │ │ ├── param.f90 │ │ └── postp.two_phase_inc_isot_turb │ │ ├── dropcheck.h90 │ │ ├── out1d.h90 │ │ ├── out2d.h90 │ │ └── out3d.h90 ├── bound.f90 ├── chkdiv.f90 ├── chkdt.f90 ├── cmpt_divth.f90 ├── common_mpi.f90 ├── correc.f90 ├── data │ └── clean.sh ├── debug.f90 ├── fft.f90 ├── fftw.f90 ├── fillps.f90 ├── funcs.f90 ├── gradls.f90 ├── initflow.f90 ├── initgrid.f90 ├── initmpi.f90 ├── initsolver.f90 ├── load.f90 ├── make.deps ├── mom.f90 ├── moms.f90 ├── output.f90 ├── postprocessing │ ├── post.f90 │ ├── readme.txt │ └── tagging.f90 ├── profiler.f90 ├── rk.f90 ├── rks.f90 ├── sanity.f90 ├── solver_cpu.f90 ├── solver_gpu.f90 ├── source.f90 ├── targets │ ├── target.generic-cray │ ├── target.generic-gnu │ ├── target.generic-gpu │ ├── target.generic-intel │ ├── target.generic-nvf │ ├── target.raplab-cpu_gnu │ ├── target.raplab-cpu_nvhpc │ └── target.raplab-gpu ├── types.f90 └── vof.f90 └── utils ├── load_and_sbatch ├── fram │ ├── compiler.out │ ├── load_fram.sh │ └── sbatch_fram.sh └── marconi100 │ ├── run.sh │ ├── run_interactively │ ├── run_profiling.sh │ └── toMake ├── plot2d ├── param.py ├── plot_2d_flow_slice.py ├── pltparams.py └── readme.txt ├── postprocessing ├── clean.sh └── reorgDrop.py ├── preprocessing └── randomDroplet.py ├── profilers ├── run.sh ├── wrap_ncu.sh └── wrap_nsys.sh ├── read_binary_data ├── matlab │ └── read_single_field_binary.m └── python │ └── read_single_field_binary.py ├── templates └── template_module.f90 ├── testing ├── autoTesting.py └── templateTest │ ├── dns.in │ ├── test.stp │ └── vof.in └── visualize_fields └── gen_xdmf_easy └── write_xdmf.py /.gitignore: -------------------------------------------------------------------------------- 1 | log 2 | src/*.mod 3 | src/*.out 4 | src/*.bin 5 | src/*.MOD 6 | src/*.log 7 | src/*log 8 | src/*dSYM 9 | src/*.xdmf 10 | src/*.xmf 11 | src/*.in 12 | src/flutas* 13 | src/**/*.o 14 | src/**/**/*.o 15 | src/data/ 16 | utils/postprocessing/*.out 17 | examples/*.out 18 | examples/*.log 19 | examples/*log 20 | 21 | 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Build all flutas.* binaries at once 2 | 3 | .PHONY: all clean 4 | 5 | .NOTPARALLEL: 6 | 7 | ARCH ?= generic-gnu 8 | USE_FAST_KERNELS ?= 0 9 | USE_NVTX ?= 0 10 | 11 | APP_LIST=single_phase two_phase_ht two_phase_inc_isot two_phase_inc_isot_turb 12 | 13 | all: 14 | @for idapp in $(APP_LIST); \ 15 | do \ 16 | make -C ./src clean-obj; make -C ./src ARCH=$(ARCH) APP=$${idapp} USE_FAST_KERNELS=$(USE_FAST_KERNELS) USE_NVTX=$(USE_NVTX) -j4 flutas.$${idapp}; \ 17 | done 18 | 19 | clean-obj: 20 | make -C ./src clean-obj 21 | 22 | clean: clean 23 | @for idapp in $(APP_LIST); \ 24 | do \ 25 | make -C ./src ARCH=$(ARCH) APP=$${idapp} clean; \ 26 | done 27 | -------------------------------------------------------------------------------- /authorship/ACKNOWLEDGEMENTS.md: -------------------------------------------------------------------------------- 1 | ## Acknowledgements 2 | 3 | The work at KTH involving different students and postdocs has been mainly financed by the European Research Council grant no. ERC-2013-CoG-616186, TRITOS and by the Swedish Research Council through different grants: VR 2014-5001 (outstanding young researcher to Luca Brandt) and Grant No. 2016-06119 (the multidisciplinary research environment INTERFACE, Hybrid multiscale modelling of transport phenomena for energy efficient processes). 4 | Computer time has been provided by SNIC (Swedish National Infrastructure for Computing) and by the National Infrastructure for High Performance Computing and Data Storage in Norway (project no. NN9561K). 5 | 6 | As regards the GPU implementation, the authors would like to acknowledge the allocation IsC84_CANSGPU, provided at CINECA on MARCONI100, used during the code development. We would also like to thank the staff at CINECA, notably Massimiliano Guarrasi and Fabio Pitarri for their help in the development of the GPU functionalities at an early stage of the work. 7 | 8 | We thank Francesco De Vita for the help with the original implementation of the VoF MTHINC method. 9 | 10 | We would also like to thank the researchers of NVIDIA among the [authors](AUTHORS.md), for the help provided in the GPU porting, ensuring that the code runs efficiently on many-GPU systems, and also for contributing to an improved software development workflow. 11 | -------------------------------------------------------------------------------- /authorship/AUTHORS.md: -------------------------------------------------------------------------------- 1 | FluTAS originated from a collective effort by researchers who worked in the group of Prof. Luca Brandt at KTH Engineering Mechanics (Sweden), an effort which promptly spread beyond the Swedish border. 2 | 3 | **List of authors** 4 | * Luca Brandt (KTH/NTNU) 5 | * Pedro Costa (University of Iceland) 6 | * Marco Crialesi-Esposito (KTH) 7 | * Andreas Demou (KTH) 8 | * Massimiliano Fatica (NVIDIA) 9 | * Everett Phillips (NVIDIA) 10 | * Marco Edoardo Rosti (Okinawa Institute of Science and Technology) 11 | * Salar Zamani Salimi (NTNU) 12 | * Nicolo Scapin (KTH) 13 | * Armin Shahmardi (KTH) 14 | * Filippo Spiga (NVIDIA) 15 | -------------------------------------------------------------------------------- /authorship/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021-2022 Authors (see AUTHORS.md) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 9 | of the Software, and to permit persons to whom the Software is furnished to do 10 | so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/autoTesting.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file performs the automatic testing for all the cases in ......... 3 | ''' 4 | 5 | import numpy as np 6 | import glob 7 | import os 8 | import re 9 | import pytest 10 | 11 | def readstp(fn): 12 | f = open(fn, 'r') 13 | for i in f: 14 | exec(i) 15 | f.close() 16 | globals().update(locals()) 17 | 18 | def readin(fn,nin): 19 | f = open(fn, 'r') 20 | data = f.readlines() 21 | return data[nin] 22 | 23 | def modin(fn,nin,strIn): 24 | f = open(fn, 'r') 25 | data = f.readlines() 26 | f.close() 27 | data[nin] = strIn 28 | with open(fn, 'w') as file: 29 | file.writelines( data ) 30 | return 31 | 32 | def chkRes(): 33 | return 34 | ############################################################# 35 | # define execution variables 36 | wfold = os.getcwd() 37 | mainFold = wfold[:[_.start() for _ in re.finditer('/',wfold)][-1]] 38 | compiler = 'generic-gnu' 39 | #compiler = 'generic-nvf' 40 | #compiler = 'generic-gpu' 41 | tfold = 'utils/testing/templateTest/' 42 | # 43 | doDBG = ['0']#,'1'] 44 | doCuda= ['1'] 45 | doProc= ['2 2\n']#,'2 2\n'] 46 | #doProc= ['1 1\n']#,'2 2\n'] 47 | #doProc= ['1 2\n']#,'2 2\n'] 48 | # 49 | cdir = os.getcwd() 50 | apps = [ name for name in os.listdir() if os.path.isdir(os.path.join(name))] 51 | 52 | os.system('rm '+wfold+'/test.out') 53 | fileOut = open('test.out','w') 54 | for dbgFlag in doDBG: 55 | for cudaFlag in doCuda: 56 | for app in apps: 57 | os.chdir(wfold+'/'+app) 58 | testcases = [ name for name in os.listdir() if os.path.isdir(os.path.join(name))] 59 | print(testcases) 60 | os.chdir(mainFold+'/src') 61 | # 62 | os.system('make clean APP='+app+'>/dev/null;'+ 63 | #'make APP='+app+' ARCH='+compiler+ ' Do_DBG='+dbgFlag+' -j8>/dev/null') 64 | 'make APP='+app+' ARCH='+compiler+ ' DO_DBG='+dbgFlag+' -j8>/dev/null') 65 | # 66 | if 'flutas' in os.listdir(): 67 | print('app '+app+', debug='+dbgFlag+' : compiled-------> PASSED') 68 | fileOut.write('app '+app+', debug='+dbgFlag+' : compiled -------> PASSED\n') 69 | for case in testcases: 70 | #print(case) 71 | #print(testcases) 72 | os.chdir(wfold+'/'+app+'/'+case) 73 | if len(glob.glob('test*'))>0: 74 | os.system('cp '+mainFold+'/src/flutas .') 75 | for procN in doProc: 76 | tmp = procN.split(' ') 77 | nmpi = int(tmp[0])*int(tmp[1]) 78 | modin('dns.in',-2,procN) 79 | os.system('mpirun.openmpi -np '+str(nmpi)+' flutas>/dev/null') 80 | testout = pytest.main() 81 | testout = str(testout)[str(testout).find('.')+1:] 82 | fileOut.write('test '+case+' np='+str(nmpi)+'-------> '+testout+'\n') 83 | #os.system('rm -rf data __pycache__ flutas;'+ 84 | os.system('rm -rf data && rm -rf .pytest_cache && rm -rf __pycache__ && rm -rf flutas*;'+ 85 | 'git checkout *.in') 86 | else: 87 | print('test '+app+', debug='+dbgFlag+' : compilation failed \n') 88 | exit() 89 | fileOut.close() 90 | -------------------------------------------------------------------------------- /examples/single_phase/coarse_turb_channel_sp/dns.in: -------------------------------------------------------------------------------- 1 | 256 128 72 ! itot, jtot, ktot 2 | 6. 3. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.95 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | rk3 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.0001773049645390071 ! rho_sp, mu_sp 8 | poi F 0.0 ! inivel, is_noise_vel, noise_vel 9 | T hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 100000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 500 1000 5000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | T F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 1. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/single_phase/coarse_turb_channel_sp/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/single_phase/coarse_turb_channel_sp/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 256, & 10 | ny = 128, & 11 | nz = 72 12 | real(8), parameter :: lx = 6.0, & 13 | ly = 3.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 100000, & 30 | nskip = 5000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/single_phase/dhc_sp/dns.in: -------------------------------------------------------------------------------- 1 | 2 128 128 ! itot, jtot, ktot 2 | 0.00111659375 7.14620d-2 7.14620d-2 ! lx, ly, lz 3 | 0. ! gr 4 | 0.95 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | rk3 cen ! time_scheme, space_scheme_mom 7 | 0.588415 3.24975d-5 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 30000 10. 0.1 ! nstep,time_max,tw_max 12 | F T F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 100 50000 50000 3000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. -9.81 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 2 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/single_phase/dhc_sp/heat_transfer_sp.in: -------------------------------------------------------------------------------- 1 | uni F 0.0 ! initmp, is_noise_tmp, noise_tmp 2 | 600.d0 ! tmp0 (init. temp. in the domain) 3 | 1004.5d0 ! cp_sp 4 | 717.5d0 ! cv_sp 5 | 0.046 ! kappa_sp 6 | P P N N D D ! cbctmp(0:1,1:3) [T BC type] 7 | 0. 0. 0. 0. 960. 240. ! bctmp(0:1,1:3) [T BC value] 8 | 1.666667d-3 ! beta_sp_th 9 | -------------------------------------------------------------------------------- /examples/single_phase/dhc_sp/post_sp.in: -------------------------------------------------------------------------------- 1 | T ! doTime 2 | 3 ! avg_dir 3 | 100 ! time_deltai 4 | F ! doFavre 5 | T ! doWall 6 | 100 ! wall_deltai 7 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_channel_sp/dns.in: -------------------------------------------------------------------------------- 1 | 64 32 32 ! itot, jtot, ktot 2 | 2. 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.1 ! rho_sp, mu_sp 8 | poi F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 50 100 500 500 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | T F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 1. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_channel_sp/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_channel_sp/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 64, & 10 | ny = 32, & 11 | nz = 32 12 | real(8), parameter :: lx = 2.0, & 13 | ly = 1.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 500 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_2D_sp/README_test.md: -------------------------------------------------------------------------------- 1 | This case helps to validate the 2D lid driven cavity test against coarse, reliable, data (to ensure the test is fast enough). 2 | 3 | Observables can be extracted directly from the binary files of y-velocity "data/vey_fld_000005000.bin". 4 | 5 | We are interested in y-velocity over the z-directin along the domain centerline. To visualize it: 6 | 7 | * plot "data_ldc_re1000.txt" using 1:2 8 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_2D_sp/data_ldc_re1000.txt: -------------------------------------------------------------------------------- 1 | 7.8125000e-03 -3.1095216e-02 2 | 2.3437500e-02 -8.3540127e-02 3 | 3.9062500e-02 -1.2849577e-01 4 | 5.4687500e-02 -1.6870639e-01 5 | 7.0312500e-02 -2.0652188e-01 6 | 8.5937500e-02 -2.4295277e-01 7 | 1.0156250e-01 -2.7747221e-01 8 | 1.1718750e-01 -3.0851031e-01 9 | 1.3281250e-01 -3.3416215e-01 10 | 1.4843750e-01 -3.5277174e-01 11 | 1.6406250e-01 -3.6332860e-01 12 | 1.7968750e-01 -3.6569098e-01 13 | 1.9531250e-01 -3.6060902e-01 14 | 2.1093750e-01 -3.4953180e-01 15 | 2.2656250e-01 -3.3426054e-01 16 | 2.4218750e-01 -3.1657378e-01 17 | 2.5781250e-01 -2.9794420e-01 18 | 2.7343750e-01 -2.7940278e-01 19 | 2.8906250e-01 -2.6153750e-01 20 | 3.0468750e-01 -2.4457665e-01 21 | 3.2031250e-01 -2.2850401e-01 22 | 3.3593750e-01 -2.1316866e-01 23 | 3.5156250e-01 -1.9837011e-01 24 | 3.6718750e-01 -1.8391366e-01 25 | 3.8281250e-01 -1.6963929e-01 26 | 3.9843750e-01 -1.5543093e-01 27 | 4.1406250e-01 -1.4121361e-01 28 | 4.2968750e-01 -1.2694478e-01 29 | 4.4531250e-01 -1.1260400e-01 30 | 4.6093750e-01 -9.8183871e-02 31 | 4.7656250e-01 -8.3682971e-02 32 | 4.9218750e-01 -6.9101153e-02 33 | 5.0781250e-01 -5.4436797e-02 34 | 5.2343750e-01 -3.9685460e-02 35 | 5.3906250e-01 -2.4839421e-02 36 | 5.5468750e-01 -9.8876651e-03 37 | 5.7031250e-01 5.1839946e-03 38 | 5.8593750e-01 2.0392858e-02 39 | 6.0156250e-01 3.5759451e-02 40 | 6.1718750e-01 5.1307811e-02 41 | 6.3281250e-01 6.7065881e-02 42 | 6.4843750e-01 8.3065834e-02 43 | 6.6406250e-01 9.9344067e-02 44 | 6.7968750e-01 1.1594047e-01 45 | 6.9531250e-01 1.3289650e-01 46 | 7.1093750e-01 1.5025151e-01 47 | 7.2656250e-01 1.6803678e-01 48 | 7.4218750e-01 1.8626699e-01 49 | 7.5781250e-01 2.0492898e-01 50 | 7.7343750e-01 2.2396841e-01 51 | 7.8906250e-01 2.4327555e-01 52 | 8.0468750e-01 2.6267212e-01 53 | 8.2031250e-01 2.8190180e-01 54 | 8.3593750e-01 3.0062699e-01 55 | 8.5156250e-01 3.1843497e-01 56 | 8.6718750e-01 3.3486128e-01 57 | 8.8281250e-01 3.4946957e-01 58 | 8.9843750e-01 3.6215024e-01 59 | 9.1406250e-01 3.7412981e-01 60 | 9.2968750e-01 3.9059183e-01 61 | 9.4531250e-01 4.2512345e-01 62 | 9.6093750e-01 5.0245396e-01 63 | 9.7656250e-01 6.5085666e-01 64 | 9.9218750e-01 8.7633306e-01 65 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_2D_sp/dns.in: -------------------------------------------------------------------------------- 1 | 2 64 64 ! itot, jtot, ktot 2 | 0.03125 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.95 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | rk3 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.001 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 20 500 1000 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 1. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 2 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_2D_sp/test_ldc_sp.py: -------------------------------------------------------------------------------- 1 | def check(): 2 | import numpy as np 3 | N = 64 4 | shape = [2,N,N] 5 | vvel = np.fromfile('data/vey_fld_000005000.bin',dtype=np.float64).reshape(shape,order='F') 6 | dataChk = vvel[0,int(N/2),:] 7 | dataOK = np.loadtxt('data_ldc_re1000.txt') 8 | tol = 1e-6 9 | chk = abs(np.mean((dataOK[:,1])-dataChk)) fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_sp/dns.in: -------------------------------------------------------------------------------- 1 | 64 64 64 ! itot, jtot, ktot 2 | 1. 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.95 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | rk3 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.001 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 20 500 1000 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | D D D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | D D D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | D D D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | N N N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 1. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_sp/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/single_phase/laminar_ldc_sp/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 64, & 10 | ny = 64, & 11 | nz = 64 12 | real(8), parameter :: lx = 1.0, & 13 | ly = 1.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/dns.in: -------------------------------------------------------------------------------- 1 | 128 64 128 ! itot, jtot, ktot 2 | 0.9342727d0 0.4671364d0 0.9342727d0 ! lx, ly, lz 3 | 0. ! gr 4 | 0.5 1.0e-4 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 fll ! time_scheme, space_scheme_mom 7 | 1000.0 1.0 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 1000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 500 1000 500000 1000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P D D P P ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P D D P P ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P D D P P ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P N N P P ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. -9.81 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/heat_transfer.in: -------------------------------------------------------------------------------- 1 | uni T 0.5 ! initmp, is_noise_tmp, noise_tmp 2 | 323.d0 323.d0 ! tl0, tg0 3 | 1.d0 1.d0 ! cp1, cp2 4 | 1.d0 1.d0 ! cv1, cv2 5 | 0.1 1.0 ! kappa1 kappa2 6 | P P D D P P ! cbctmp(0:1,1:3) [T BC type] 7 | 0. 0. 328. 318. 0. 0. ! bctmp(0:1,1:3) [T BC value] 8 | 323.d0 0.1 0.1 ! tmp0, beta1_th, beta2_th 9 | 300.d0 ! [J*kg^(-1)*K^(-1)] gas constant 10 | 9690 ! p0_0 11 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/post.in: -------------------------------------------------------------------------------- 1 | T ! doTime 2 | 2 ! avg_dir 3 | 100 ! time_deltai 4 | F ! doFavre 5 | T ! doWall 6 | 100 ! wall_deltai 7 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 6 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre','vof','tmp'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 128, & 10 | ny = 64 , & 11 | nz = 128 12 | real(8), parameter :: lx = 0.9342727, & 13 | ly = 0.4671364, & 14 | lz = 0.9342727 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 1000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_ht/coarse_two_layer_rb/vof.in: -------------------------------------------------------------------------------- 1 | 100. 1000. 0.1 1.0 ! rho1, rho2, mu1,mu2 2 | tay ! inivof 3 | 1 ! nbub 4 | 0.5 0.5 0.5 0.125 ! xc, yc, zc, r 5 | P P N N P P ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | 2.140702596 ! sigma 8 | F 50000 ! lateInit, i_lateInit 9 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/README_test.md: -------------------------------------------------------------------------------- 1 | This case helps to validate the code with respect to single phase, buoyancy driven flows, using the Boussinesq approximation. The setup is similar to the case with Rayleigh number 10^6 in: 2 | "Accurate solutions to the square thermally driven cavity at high Rayleigh number" 3 | P. Le Quere, Computers and Fluids Vol.20, No. 1, pp 29-41, 1991. 4 | 5 | Observables: 6 | 1. First column: time; 7 | 2. Second column: Nusselt on the hot wall; 8 | 3. Third column: Nusselt on the cold wall. 9 | 10 | To visualize using e.g., gnuplot: 11 | plot "nusselt_ref.out" using 1:2, "nusselt_ref.out" using 1:($3*(-1)) 12 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/dns.in: -------------------------------------------------------------------------------- 1 | 2 128 128 ! itot, jtot, ktot 2 | 0.00111659375 7.14620d-2 7.14620d-2 ! lx, ly, lz 3 | 0. ! gr 4 | 0.5 1.0e-4 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 fll ! time_scheme, space_scheme_mom 7 | 0.588415 3.24975d-5 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 30000 10. 0.1 ! nstep,time_max,tw_max 12 | F T F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 100 50000 50000 3000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. -9.81 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 2 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/heat_transfer.in: -------------------------------------------------------------------------------- 1 | uni F 0.0 ! initmp, is_noise_tmp, noise_tmp 2 | 600.d0 600.d0 ! tl0, tg0 (init. temp. of liquid and gas in case of 'sin') 3 | 1004.5d0 1004.5d0 ! cp1, cp2 4 | 717.5d0 717.5d0 ! cv1, cv2 5 | 0.046 0.046 ! kappa1 kappa2 6 | P P N N D D ! cbctmp(0:1,1:3) [T BC type] 7 | 0. 0. 0. 0. 960. 240. ! bctmp(0:1,1:3) [T BC value] 8 | 600.0 1.666667d-3 1.666667d-3 ! T0, beta1_th, beta2_th 9 | 287.0 ! [J*kg^(-1)*K^(-1)] gas constant 10 | 101325.0 ! p0_0=(1.d0/1.5d0)*9.d0*10.d4 11 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/post.in: -------------------------------------------------------------------------------- 1 | T ! doTime 2 | 3 ! avg_dir 3 | 100 ! time_deltai 4 | F ! doFavre 5 | T ! doWall 6 | 100 ! wall_deltai 7 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/test_dhc.py: -------------------------------------------------------------------------------- 1 | def check(): 2 | import numpy as np 3 | dataOK = np.loadtxt('nusselt_ref.out') 4 | dataChk= np.loadtxt('data/post/wall/nusselt.out') 5 | tol = 1e-6 6 | nts = 10000 7 | chk = abs(np.mean(dataOK[-nts:,2])-np.mean(dataChk[-nts:,2])) fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 30000, & 30 | nskip = 3000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_ht/dhc/vof.in: -------------------------------------------------------------------------------- 1 | 0.588415 0.588415 3.24975d-5 3.24975d-5 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub 4 | 0.0625 3. 3. 10. ! xc, yc, zc, r 5 | P P N N N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | 0.0 ! sigma 8 | T 100000 ! lateInit, i_lateInit 9 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/dns.in: -------------------------------------------------------------------------------- 1 | 1024 1024 512 ! itot, jtot, ktot 2 | 0.019115d0 0.019115d0 0.009557534d0 ! lx, ly, lz 3 | 0. ! gr 4 | 0.25 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 fll ! time_scheme, space_scheme_mom 7 | 996.56 5.8331d-4 ! density, dynamic viscosity 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hvk ! is_wallturb 10 | cfr ! bulk_ftype 11 | 5000000 10. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 1000 50000 10000 100000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. -9.81 ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/heat_transfer.in: -------------------------------------------------------------------------------- 1 | uni T 0.5 ! initmp, is_noise_tmp, noise_tmp 2 | 300.d0 300.d0 ! tl0, tg0 (init. temp. of liquid and gas in case of 'sin') 3 | 4180.0d0 4180.0d0 ! cp1, cp2 !@ 300.0 K: Cp_water = 4180.0d0 J/(kg.K) 4 | 4130.0d0 4130.0d0 ! cv1, cv2 !@ 300.0 K: Cv_water = 4130.0d0 J/(kg.K) 5 | 0.60956 0.60956 ! kappa1 kappa2 ! water thermal conductivity W/(m.K) @ 300.0 K 6 | P P P P D D ! cbctmp(0:1,1:3) [T BC type] 7 | 0. 0. 0. 0. 305. 295. ! bctmp(0:1,1:3) [T BC value] delta_T=10 !!! default values were: 960. 240. 8 | 300.0 0.1 0.1 ! T0, beta_th = 1/T0 for ideal gas !if Fr = 1--> beta=1/delta_t !for water @ 300.0 K:beta_th = 2.754d-4 (1/K) 9 | 287.0 ! [J*kg^(-1)*K^(-1)] gas constant ! for Air = 287.05 J/(kg.K) 10 | 101325.0 ! p0_0=(1.d0/1.5d0)*9.d0*10.d4 11 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/post.in: -------------------------------------------------------------------------------- 1 | T ! doTime =do_avg 2 | 3 ! avg_dir 3 | 50000 ! time_deltai 4 | F ! doFavre 5 | T ! doWall 6 | 100 ! wall_deltai 7 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/post_vof.in: -------------------------------------------------------------------------------- 1 | T 2000 ! do_tagging, iout0d_ta 2 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 6 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre','vof','tmp'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 1024, & 10 | ny = 512 , & 11 | nz = 1024 12 | real(8), parameter :: lx = 0.019115, & 13 | ly = lx, & 14 | lz = lx/2.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 1000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_ht/droplet_rb/vof.in: -------------------------------------------------------------------------------- 1 | 996.56 996.56 5.8331d-4 5.8331d-4 ! rho1, rho2, mu1,mu2 2 | bub ! inivof 3 | 782 ! nbub 4 | 0.5 0.5 0.5 0.125 ! xc, yc, zc, r 5 | P P P P N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | 0.0001488422 ! sigma 8 | T 10000000 ! lateInit, i_lateInit 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/coarse_turb_channel/dns.in: -------------------------------------------------------------------------------- 1 | 256 128 72 ! itot, jtot, ktot 2 | 6. 3. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.0001773049645390071 ! rho_sp, mu_sp 8 | poi F 0.0 ! inivel, is_noise_vel, noise_vel 9 | T hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 100000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 20 500 5000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | T F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 1. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 4 4 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/coarse_turb_channel/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/coarse_turb_channel/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 256, & 10 | ny = 128, & 11 | nz = 72 12 | real(8), parameter :: lx = 6.0, & 13 | ly = 3.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 100000, & 30 | nskip = 5000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/coarse_turb_channel/vof.in: -------------------------------------------------------------------------------- 1 | 1. 1. 0.0001773049645390071 0.0001773049645390071 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub ! dummy value: we put 1 to avoid problems in compilation 4 | 0.5 0.5 0.5 1. ! xc, yc, zc, r 5 | P P P P N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | .0 ! sigma 8 | T 100000 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_channel/dns.in: -------------------------------------------------------------------------------- 1 | 64 32 32 ! itot, jtot, ktot 2 | 2. 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.1 ! rho_sp, mu_sp 8 | poi F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 50 100 500 500 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | T F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 1. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_channel/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_channel/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 64, & 10 | ny = 32, & 11 | nz = 32 12 | real(8), parameter :: lx = 2.0, & 13 | ly = 1.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 500 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_channel/vof.in: -------------------------------------------------------------------------------- 1 | 1. 1. 0.1 0.1 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub ! dummy value: we put 1 to avoid problems in compilation 4 | 0.5 0.5 0.5 1. ! xc, yc, zc, r 5 | P P P P N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | .0 ! sigma 8 | T 100000 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc/dns.in: -------------------------------------------------------------------------------- 1 | 64 64 64 ! itot, jtot, ktot 2 | 1. 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.001 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 20 500 1000 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | D D D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | D D D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | D D D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | N N N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 1. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc/visu_ex/genview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gfortran gen_xdmf.f90 -o a.out && ./a.out && rm -rf a.out 4 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc/visu_ex/param.h90: -------------------------------------------------------------------------------- 1 | ! 2 | integer, parameter :: nscal = 4 ! this value must match the number of scalars in "scalname" 3 | character (len=3), parameter :: casename = 'lst' 4 | character (len=3), parameter , dimension(nscal) :: scalname = (/'vex','vey','vez', & 5 | 'pre'/) 6 | ! 7 | ! note: select nx,ny,nz and lx,ly,lz according to the values in your dns.in 8 | ! 9 | integer, parameter :: nx = 64, & 10 | ny = 64, & 11 | nz = 64 12 | real(8), parameter :: lx = 1.0, & 13 | ly = 1.0, & 14 | lz = 1.0 15 | real(8), parameter :: dx = lx/(1.*nx), & 16 | dy = ly/(1.*ny), & 17 | dz = lz/(1.*nz) 18 | real(8), parameter :: x0 = -lx/2.d0+dx/2.d0*1.d0, & 19 | y0 = -ly/2.d0+dy/2.d0*1.d0, & 20 | z0 = -lz/2.d0+dz/2.d0*1.d0 21 | real(8), parameter :: t0 = 0.d0 22 | real(8), parameter :: dt = 1.0d0 23 | ! 24 | ! note: --> fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc/vof.in: -------------------------------------------------------------------------------- 1 | 1. 1. 0.1 0.1 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub ! dummy value: we put 1 to avoid problems in compilation 4 | 0.5 0.5 0.5 1. ! xc, yc, zc, r 5 | P P P P N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | .0 ! sigma 8 | T 100000 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc_2D/README_test.md: -------------------------------------------------------------------------------- 1 | This case helps to validate the 2D lid driven cavity test against coarse, reliable, data (to ensure the test is fast enough). 2 | 3 | Observables can be extracted directly from the binary files of y-velocity "data/vey_fld_000005000.bin". 4 | 5 | We are interested in y-velocity over the z-directin along the domain centerline. To visualize it: 6 | 7 | * plot "data_ldc_re1000.txt" using 1:2 8 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc_2D/data_ldc_re1000.txt: -------------------------------------------------------------------------------- 1 | 7.8125000e-03 -3.1095216e-02 2 | 2.3437500e-02 -8.3540127e-02 3 | 3.9062500e-02 -1.2849577e-01 4 | 5.4687500e-02 -1.6870639e-01 5 | 7.0312500e-02 -2.0652188e-01 6 | 8.5937500e-02 -2.4295277e-01 7 | 1.0156250e-01 -2.7747221e-01 8 | 1.1718750e-01 -3.0851031e-01 9 | 1.3281250e-01 -3.3416215e-01 10 | 1.4843750e-01 -3.5277174e-01 11 | 1.6406250e-01 -3.6332860e-01 12 | 1.7968750e-01 -3.6569098e-01 13 | 1.9531250e-01 -3.6060902e-01 14 | 2.1093750e-01 -3.4953180e-01 15 | 2.2656250e-01 -3.3426054e-01 16 | 2.4218750e-01 -3.1657378e-01 17 | 2.5781250e-01 -2.9794420e-01 18 | 2.7343750e-01 -2.7940278e-01 19 | 2.8906250e-01 -2.6153750e-01 20 | 3.0468750e-01 -2.4457665e-01 21 | 3.2031250e-01 -2.2850401e-01 22 | 3.3593750e-01 -2.1316866e-01 23 | 3.5156250e-01 -1.9837011e-01 24 | 3.6718750e-01 -1.8391366e-01 25 | 3.8281250e-01 -1.6963929e-01 26 | 3.9843750e-01 -1.5543093e-01 27 | 4.1406250e-01 -1.4121361e-01 28 | 4.2968750e-01 -1.2694478e-01 29 | 4.4531250e-01 -1.1260400e-01 30 | 4.6093750e-01 -9.8183871e-02 31 | 4.7656250e-01 -8.3682971e-02 32 | 4.9218750e-01 -6.9101153e-02 33 | 5.0781250e-01 -5.4436797e-02 34 | 5.2343750e-01 -3.9685460e-02 35 | 5.3906250e-01 -2.4839421e-02 36 | 5.5468750e-01 -9.8876651e-03 37 | 5.7031250e-01 5.1839946e-03 38 | 5.8593750e-01 2.0392858e-02 39 | 6.0156250e-01 3.5759451e-02 40 | 6.1718750e-01 5.1307811e-02 41 | 6.3281250e-01 6.7065881e-02 42 | 6.4843750e-01 8.3065834e-02 43 | 6.6406250e-01 9.9344067e-02 44 | 6.7968750e-01 1.1594047e-01 45 | 6.9531250e-01 1.3289650e-01 46 | 7.1093750e-01 1.5025151e-01 47 | 7.2656250e-01 1.6803678e-01 48 | 7.4218750e-01 1.8626699e-01 49 | 7.5781250e-01 2.0492898e-01 50 | 7.7343750e-01 2.2396841e-01 51 | 7.8906250e-01 2.4327555e-01 52 | 8.0468750e-01 2.6267212e-01 53 | 8.2031250e-01 2.8190180e-01 54 | 8.3593750e-01 3.0062699e-01 55 | 8.5156250e-01 3.1843497e-01 56 | 8.6718750e-01 3.3486128e-01 57 | 8.8281250e-01 3.4946957e-01 58 | 8.9843750e-01 3.6215024e-01 59 | 9.1406250e-01 3.7412981e-01 60 | 9.2968750e-01 3.9059183e-01 61 | 9.4531250e-01 4.2512345e-01 62 | 9.6093750e-01 5.0245396e-01 63 | 9.7656250e-01 6.5085666e-01 64 | 9.9218750e-01 8.7633306e-01 65 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc_2D/dns.in: -------------------------------------------------------------------------------- 1 | 2 64 64 ! itot, jtot, ktot 2 | .03125 1. 1. ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.001 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 5000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 20 500 1000 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 1. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 2 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc_2D/test_ldc.py: -------------------------------------------------------------------------------- 1 | def check(): 2 | import numpy as np 3 | N = 64 4 | shape = [2,N,N] 5 | vvel = np.fromfile('data/vey_fld_000005000.bin',dtype=np.float64).reshape(shape,order='F') 6 | dataChk = vvel[0,int(N/2),:] 7 | dataOK = np.loadtxt('data_ldc_re1000.txt') 8 | tol = 1e-6 9 | chk = abs(np.mean((dataOK[:,1])-dataChk)) fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 5000, & 30 | nskip = 1000 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/laminar_ldc_2D/vof.in: -------------------------------------------------------------------------------- 1 | 1. 1. 0.001 0.001 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub ! dummy value: we put 1 to avoid problems in compilation 4 | 0.5 0.5 0.5 1. ! xc, yc, zc, r 5 | P P P P N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | .0 ! sigma 8 | T 100000 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/rising_bubble_3d/README_test.md: -------------------------------------------------------------------------------- 1 | This case helps to validate the code with respect to two-phase incompressible and isothermal code. The setup is the Rising Bubble test case: "Hysing, S.; Turek, S.; Kuzmin, D.; Parolini, N.; Burman, E.; Ganesan, S.; Tobiska, L.: Quantitative benchmark computations of two-dimensional bubble dynamics, International Journal for Numerical Methods in Fluids, Volume 60 Issue 11, Pages 1259-1288, DOI: 10.1002/fld.1934, 2009". 2 | 3 | Observables: 4 | 1. First column: time; 5 | 2. Second column: x-position of bubble center of mass; 6 | 3. Third column: y-position of bubble center of mass; 7 | 4. Fourth column: z-position of bubble center of mass; 8 | 5. Fifth column: x-velocity of the bubble center of mass; 9 | 6. Sixth column: y-velocity of the bubble center of mass; 10 | 7. Seventh column: z-velocity of the bubble center of mass; 11 | 12 | We are interested in z-position and z-velocity of the bubble center of mass. To visualize them: 13 | 14 | * for the position: plot "pos_vt_ref.out" using 1:4 15 | * for the velocity: plot "pos_vt_ref.out" using 1:7 16 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/rising_bubble_3d/dns.in: -------------------------------------------------------------------------------- 1 | 32 32 64 ! itot, jtot, ktot 2 | 1.d0 1.d0 2.d0 ! lx, ly, lz 3 | 0. ! gr 4 | .25 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1000.0 10.0 ! rho_sp, mu_sp 8 | zer F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 100000 3. 0.1 ! nstep,time_max,tw_max 12 | F T F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 200000 50000000 100 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | D D D D D D ! cbcvel(0:1,1:3,1) [u BC type] 16 | D D D D D D ! cbcvel(0:1,1:3,2) [v BC type] 17 | D D D D D D ! cbcvel(0:1,1:3,3) [w BC type] 18 | N N N N N N ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. -0.98 ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 1 1 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/rising_bubble_3d/test_bub.py: -------------------------------------------------------------------------------- 1 | def check(): 2 | import numpy as np 3 | dataOK = np.loadtxt('pos_vt_ref.out') 4 | dataChk= np.loadtxt('data/pos_vt.out') 5 | tol1 = 2e-6 6 | tol2 = 1e-6 7 | nts = 10000 8 | chk1 = abs(np.mean((dataOK[:,3])-dataChk[:,3])) fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 1600, & 30 | nskip = 100 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot/rising_bubble_3d/vof.in: -------------------------------------------------------------------------------- 1 | 100. 1000. 1. 10. ! rho1, rho2, mu1,mu2 2 | bub ! inivof 3 | 1 ! nbub 4 | 0.5 0.5 0.5 0.25 ! xc, yc, zc, r 5 | N N N N N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | 24.5 ! sigma 8 | F 0 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot_turb/abc_triperiodic/README_test.md: -------------------------------------------------------------------------------- 1 | This case helps to validate the code with respect to single phase incompressible and isothermal code. The setup is the Arnold-Beltrami-Childress (ABC) flow as described in e.g.: 2 | "Podvigina, O & Pouquet, A1994 On the non-linear stability of the 1: 1: 1 abc flow.PhysicaD: Nonlinear Phenomena75(4), 471–508." 3 | 4 | Observables: 5 | 1. First column: time-steps; 6 | 2. Second column: time; 7 | 3. Third column: mean kinetic energy; 8 | 9 | We are interested in the evolution of the mean kinetic energy in time, which, given the laminar regime, should be kept as close as possible to 1.5. To visualize it: 10 | plot "ke_t_ref.out" using 2:3 11 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot_turb/abc_triperiodic/dns.in: -------------------------------------------------------------------------------- 1 | 32 32 32 ! itot, jtot, ktot 2 | 6.283185307179586 6.283185307179586 6.283185307179586 ! lx, ly, lz 3 | 0. ! gr 4 | 0.50 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | ab2 cen ! time_scheme, space_scheme_mom 7 | 1.0 0.1 ! rho2, mu2 8 | abc F 0.0 ! inivel, is_noise_vel, noise_vel 9 | F hkv ! is_wallturb, wallturb_type 10 | cfr ! bulk_ftype 11 | 30000 100. 0.1 ! nstep,time_max,tw_max 12 | T F F ! stop_type(1:3) 13 | F 2 1 T ! restart, num_max_chkpt, input_chkpt, latest 14 | 10 10 100 1000 5000 10000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 15 | P P P P P P ! cbcvel(0:1,1:3,1) [u BC type] 16 | P P P P P P ! cbcvel(0:1,1:3,2) [v BC type] 17 | P P P P P P ! cbcvel(0:1,1:3,3) [w BC type] 18 | P P P P P P ! cbcpre(0:1,1:3 ) [p BC type] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 22 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 23 | F F F ! is_forced(1:3) 24 | 0. 0. 0. ! gacc_x,gacc_y,gacc_z 25 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 26 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 27 | F F F F F F ! is_outflow(0:1,1:3) 28 | 2 2 ! dims_in(1:2) 29 | 4 ! numthreadsmax 30 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot_turb/abc_triperiodic/forcing.in: -------------------------------------------------------------------------------- 1 | abc ! turbType 2 | 1.0 ! u0_t 3 | 0.1 ! f0_t ! should be 1.0*visc*(2.0*pi/l)**2 and l = lx=ly=lz 4 | 1 ! k0_t ! frequency of the forcing, i.e.: 1, 2, 3, ... 5 | 1.0 1.0 1.0 ! A, B and C constants of Arnold-Beltrami-Childress flow 6 | F ! add disturbance to the initial condition to trigger transition 7 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot_turb/abc_triperiodic/test_abc.py: -------------------------------------------------------------------------------- 1 | def check(): 2 | import numpy as np 3 | dataOK = np.loadtxt('ke_t_ref.out') 4 | dataChk= np.loadtxt('data/ke_t.out') 5 | tol = 1e-6 6 | nts = 10000 7 | chk = abs(np.mean(dataOK[-nts:,2])-np.mean(dataChk[-nts:,2])) fldstart = the first time-step you want to visualize; 25 | ! --> fldend = the last time-step you want to visualize; 26 | ! --> nskip = the frequency at which you want to visualize the fields. 27 | ! 28 | integer, parameter :: fldstart = 0 , & 29 | fldend = 1000, & 30 | nskip = 100 31 | integer, parameter :: fldinit = 0 32 | -------------------------------------------------------------------------------- /examples/two_phase_inc_isot_turb/abc_triperiodic/vof.in: -------------------------------------------------------------------------------- 1 | 1. 1. 0.1 0.1 ! rho1, rho2, mu1,mu2 2 | uni ! inivof 3 | 1 ! nbub ! dummy value: we put 1 to avoid problems in compilation 4 | 0.5 0.5 0.5 1. ! xc, yc, zc, r 5 | P P P P P P ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | .0 ! sigma 8 | T 100000 ! late_init,i_late_init 9 | -------------------------------------------------------------------------------- /getting_started/INFO_VISU.md: -------------------------------------------------------------------------------- 1 | # how to visualize the output binary files 2 | 3 | ### the easy way 4 | 5 | In addition to the binary files for visualization, the code now generates a log file that contains information about the saved data (see `out2d.h90` and `out3d.h90` for more details); this new approach uses that log file to generate the `Xdmf` visualization file. 6 | 7 | The steps are as follows: 8 | 9 | 1. after the simulation has run, copy the contents of `utils/visualize_fields/gen_xdmf_easy/write_xdmf.py` to the simulation `data` folder; 10 | 2. run the file with `python write_xdmf.py` in the `data` folder. If successful, this operation generates `viewfld_DNS.xmf` or `viewfld_DNS_2d.xmf` (see below) in the same folder; 11 | 3. load the generated Xdmf (`*.xmf`) file using paraview/visit or other visualization software, i.e., on the command line `paraview viewfld_DNS.xmf`. If requested, choose XDMF Reader in Paraview! 12 | 13 | ## example: how to visualize the default binary output 14 | 15 | ### 3D fields 16 | 17 | when running the script, `write_xdmf.py` we get the following prompts: 18 | 19 | ~~~ 20 |  $ python write_xdmf.py 21 |  Name of the log file written by FluTAS [log_visu_3d.out]: 22 |  Name to be appended to the grid files to prevent overwriting []: 23 |  Name of the output file [viewfld_DNS.xmf]: 24 | ~~~ 25 | 26 | * the first value is the name of the file that logged the saved data; 27 | * the second is a name to append to the grid files that are generated, which should change for different log files to prevent conflicts; 28 | * the third is the name of the visualization file. 29 | 30 | By pressing enter three times, the default values in the square brackets are assumed by the script; these correspond to the default steps required for visualizing 3D field data. 31 | 32 | ### 2D fields 33 | 34 | the procedure for visualizing 2D field data that is saved in `out2d.h90` is exactly the same; it is just that the correct log file should be selected. The code saves by default field data in a plane of constant `y=ly/2`, and logs the saves to a file named `log_visu_2d_slice_1.out`. If more planes are saved, the user should make sure that one log file per plane is saved by the code (e.g. if another plane is saved, the log file written in `out2d.h90` could be named `log_visu_2d_slice_2.out`); see `out2d.h90` for more details. The corresponding steps to generate the Xdmf file would be, for instance: 35 | 36 | ~~~ 37 |  $ python write_xdmf.py 38 |  Name of the log file written by FluTAS [log_visu_3d.out]: log_visu_2d_slice_1.out 39 |  Name to be appended to the grid files to prevent overwriting []: 2d 40 |  Name of the output file [viewfld_DNS.xmf]: viewfld_DNS_2d.xmf 41 | ~~~ 42 | 43 | ## Alternative Approach 44 | The code performs output through the library 2DECOMP, which dumps one binary file for each field. The results can be then visualized through the files available in the folder `src/data/`. To do so: 45 | 46 | 1. Copy in the folder `src/data/` the following files: `param.h90`, `genview.sh` and `gen_xdmf.f90`. If you do not have them, you can take those files from one of the examples, e.g.: cp ../../examples/two_phase_inc_isot/rising_bubble_3d/visu_ex/* . 47 | 2. Change `param.h90` consistently with the input files (the .in files) and the number of fields you want to visualize; 48 | 3. On the command line, type: `./genview.sh`. This command generates a file (viewfld.xdmf) in the same directory; 49 | 4. To open it, type on the command line `paraview viewfld.xdmf`. 50 | -------------------------------------------------------------------------------- /getting_started/REQ.md: -------------------------------------------------------------------------------- 1 | ## Prerequisites 2 | **FluTAS** requires some external libraries for being compiled and run. In both CPU and GPU versions, the code parallelization relies on the library 2DECOMP, which is already included in the downloadable source code. Moreover, it requires an updated version of MPI and OpenMP. These are typically provided in standard supercomputers and can be easily installed in personal workstations and laptops. 3 | 4 | ### CPU version 5 | For the use in ***CPU***, **FluTAS** requires the library FFTW to perform Fast Fourier transforms. FFTW should be downloaded, compiled separately and linked in the chosen `src/targets/*` file. To do so, we list here a series of steps: 6 | 7 | 1. Even before downloading/cloning **FluTAS**, create a separate directory in a location of your choice where to place the library. Any choice is fine, but we strongly recommend creating an independent directory outside any numerical code you have. On the command line, one can type `mkdir numerical_libraries` to create it, `cd numerical_libraries` to go inside it. Hereinafter and unless otherwise stated, this path is termed `/numerical_libraries/`. Type on the command line `pwd` to know your actual path, i.e. what to put instead of ``. This information will be necessary for steps 4 and 5; 8 | 9 | 2. Go to the page [download FFTW](http://www.fftw.org/download.html) and check which is the latest release of the library. While we are writing this note (May 2023), the latest release is `FFTW 3.3.10` and, therefore, we specify the procedure for this version. In case of subsequent releases, the procedure can be easily adjusted; 10 | 11 | 3. Go inside the new directory, i.e., `cd numerical_libraries`, and on the terminal type the following commands (or create a bash script to be run on the terminal) 12 | 13 | ~~~ 14 | wget https://www.fftw.org/fftw-3.3.10.tar.gz 15 | tar xzf fftw-3.3.10.tar.gz 16 | rm -f fftw-3.3.10.tar.gz && cd fftw-3.3.10 17 | ~~~ 18 | 19 | 4. On the command line type (or create a bash script to be run on the terminal) 20 | 21 | ~~~ 22 | ./configure FC=mpif90 CC=cc CXX=CC --prefix=/numerical_libraries/fftw-3.3.10/ --enable-threads --enable-openmp --enable-mpi 23 | make -j 24 | make -j check 25 | make install 26 | ~~~ 27 | Note that here you must provide the correct path instead of `` (see step 1); 28 | 29 | 5. Use again the information about the path where you have installed `fftw-3.3.10` to link **FluTAS** with the FFTW library in the chosen `src/targets/*`. You can add your path as an argument of `FFTW_HOME :=`. If there is an existing one, replace it with yours. 30 | 31 | So far we have specified the compilation procedure for the [`GNU compiler`](./../src/targets/target.generic-gnu). In case you plan to use alternative Fortran and/or C compilers, the compilation options `FC`, `CC` and `CXX` and the chosen `src/targets/*` should be changed accordingly. 32 | 33 | ### GPU version 34 | For the use in ***GPU***, two NVIDIA libraries cuFFT and cuRAND from the CUDA toolkit are required and can be downloaded from the NVIDIA HPC SDK package (https://developer.nvidia.com/hpc-sdk). 35 | 36 | 37 | -------------------------------------------------------------------------------- /src/2decomp/LICENSE: -------------------------------------------------------------------------------- 1 | The source files inside this folder are part of the 2DECOMP&FFT library 2 | and follow follow a separate copyright (stated in the header of each file). 3 | -------------------------------------------------------------------------------- /src/2decomp/factor.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! A few utility routines to find factors of integer numbers 13 | 14 | subroutine findfactor(num, factors, nfact) 15 | 16 | implicit none 17 | 18 | integer, intent(IN) :: num 19 | integer, intent(OUT), dimension(*) :: factors 20 | integer, intent(OUT) :: nfact 21 | integer :: i, m 22 | 23 | ! find the factors <= sqrt(num) 24 | m = int(sqrt(real(num))) 25 | nfact = 1 26 | do i=1,m 27 | if (num/i*i == num) then 28 | factors(nfact) = i 29 | nfact = nfact + 1 30 | end if 31 | end do 32 | nfact = nfact - 1 33 | 34 | ! derive those > sqrt(num) 35 | if (factors(nfact)**2/=num) then 36 | do i=nfact+1, 2*nfact 37 | factors(i) = num / factors(2*nfact-i+1) 38 | end do 39 | nfact = nfact * 2 40 | else 41 | do i=nfact+1, 2*nfact-1 42 | factors(i) = num / factors(2*nfact-i) 43 | end do 44 | nfact = nfact * 2 - 1 45 | endif 46 | 47 | return 48 | 49 | end subroutine findfactor 50 | 51 | 52 | subroutine primefactors(num, factors, nfact) 53 | 54 | implicit none 55 | 56 | integer, intent(IN) :: num 57 | integer, intent(OUT), dimension(*) :: factors 58 | integer, intent(INOUT) :: nfact 59 | 60 | integer :: i, n 61 | 62 | i = 2 63 | nfact = 1 64 | n = num 65 | do 66 | if (mod(n,i) == 0) then 67 | factors(nfact) = i 68 | nfact = nfact + 1 69 | n = n / i 70 | else 71 | i = i + 1 72 | end if 73 | if (n == 1) then 74 | nfact = nfact - 1 75 | exit 76 | end if 77 | end do 78 | 79 | return 80 | 81 | end subroutine primefactors 82 | 83 | -------------------------------------------------------------------------------- /src/2decomp/halo.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 13 | ! Halo cell support for neighbouring pencils to exchange data 14 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 15 | subroutine update_halo_real(in, out, level, opt_decomp, opt_global) 16 | 17 | implicit none 18 | 19 | integer, intent(IN) :: level ! levels of halo cells required 20 | real(mytype), dimension(:,:,:), intent(IN) :: in 21 | real(mytype), allocatable, dimension(:,:,:), intent(OUT) :: out 22 | TYPE(DECOMP_INFO), optional :: opt_decomp 23 | logical, optional :: opt_global 24 | 25 | TYPE(DECOMP_INFO) :: decomp 26 | logical :: global 27 | 28 | ! starting/ending index of array with halo cells 29 | integer :: xs, ys, zs, xe, ye, ze 30 | 31 | integer :: i, j, k, s1, s2, s3, ierror 32 | integer :: data_type 33 | 34 | integer :: icount, ilength, ijump 35 | integer :: halo12, halo21, halo31, halo32 36 | integer, dimension(4) :: requests 37 | integer, dimension(MPI_STATUS_SIZE,4) :: status 38 | integer :: tag_e, tag_w, tag_n, tag_s, tag_t, tag_b 39 | 40 | data_type = real_type 41 | 42 | #include "halo_common.f90" 43 | 44 | return 45 | end subroutine update_halo_real 46 | 47 | 48 | subroutine update_halo_complex(in, out, level, opt_decomp, opt_global) 49 | 50 | implicit none 51 | 52 | integer, intent(IN) :: level ! levels of halo cells required 53 | complex(mytype), dimension(:,:,:), intent(IN) :: in 54 | complex(mytype), allocatable, dimension(:,:,:), intent(OUT) :: out 55 | TYPE(DECOMP_INFO), optional :: opt_decomp 56 | logical, optional :: opt_global 57 | 58 | TYPE(DECOMP_INFO) :: decomp 59 | logical :: global 60 | 61 | ! starting/ending index of array with halo cells 62 | integer :: xs, ys, zs, xe, ye, ze 63 | 64 | integer :: i, j, k, s1, s2, s3, ierror 65 | integer :: data_type 66 | 67 | integer :: icount, ilength, ijump 68 | integer :: halo12, halo21, halo31, halo32 69 | integer, dimension(4) :: requests 70 | integer, dimension(MPI_STATUS_SIZE,4) :: status 71 | integer :: tag_e, tag_w, tag_n, tag_s, tag_t, tag_b 72 | 73 | data_type = complex_type 74 | 75 | #include "halo_common.f90" 76 | 77 | return 78 | end subroutine update_halo_complex 79 | 80 | 81 | 82 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 83 | ! To support halo-cell exchange: 84 | ! find the MPI ranks of neighbouring pencils 85 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 86 | subroutine init_neighbour 87 | 88 | integer :: ierror 89 | 90 | ! For X-pencil 91 | neighbour(1,1) = MPI_PROC_NULL ! east 92 | neighbour(1,2) = MPI_PROC_NULL ! west 93 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_X, 0, 1, & 94 | neighbour(1,4), neighbour(1,3), ierror) ! north & south 95 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_X, 1, 1, & 96 | neighbour(1,6), neighbour(1,5), ierror) ! top & bottom 97 | 98 | ! For Y-pencil 99 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_Y, 0, 1, & 100 | neighbour(2,2), neighbour(2,1), ierror) ! east & west 101 | neighbour(2,3) = MPI_PROC_NULL ! north 102 | neighbour(2,4) = MPI_PROC_NULL ! south 103 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_Y, 1, 1, & 104 | neighbour(2,6), neighbour(2,5), ierror) ! top & bottom 105 | 106 | ! For Z-pencil 107 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_Z, 0, 1, & 108 | neighbour(3,2), neighbour(3,1), ierror) ! east & west 109 | call MPI_CART_SHIFT(DECOMP_2D_COMM_CART_Z, 1, 1, & 110 | neighbour(3,4), neighbour(3,3), ierror) ! north & south 111 | neighbour(3,5) = MPI_PROC_NULL ! top 112 | neighbour(3,6) = MPI_PROC_NULL ! bottom 113 | 114 | return 115 | end subroutine init_neighbour 116 | -------------------------------------------------------------------------------- /src/2decomp/io_read_one.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! This file contain common code to be included by subroutines 13 | ! 'mpiio_read_one_...' in io.f90 14 | 15 | ! Using MPI-IO to write a distributed 3D array into a file 16 | 17 | if (present(opt_decomp)) then 18 | decomp = opt_decomp 19 | else 20 | call get_decomp_info(decomp) 21 | end if 22 | 23 | ! determine subarray parameters 24 | sizes(1) = decomp%xsz(1) 25 | sizes(2) = decomp%ysz(2) 26 | sizes(3) = decomp%zsz(3) 27 | 28 | if (ipencil == 1) then 29 | subsizes(1) = decomp%xsz(1) 30 | subsizes(2) = decomp%xsz(2) 31 | subsizes(3) = decomp%xsz(3) 32 | starts(1) = decomp%xst(1)-1 ! 0-based index 33 | starts(2) = decomp%xst(2)-1 34 | starts(3) = decomp%xst(3)-1 35 | else if (ipencil == 2) then 36 | subsizes(1) = decomp%ysz(1) 37 | subsizes(2) = decomp%ysz(2) 38 | subsizes(3) = decomp%ysz(3) 39 | starts(1) = decomp%yst(1)-1 40 | starts(2) = decomp%yst(2)-1 41 | starts(3) = decomp%yst(3)-1 42 | else if (ipencil == 3) then 43 | subsizes(1) = decomp%zsz(1) 44 | subsizes(2) = decomp%zsz(2) 45 | subsizes(3) = decomp%zsz(3) 46 | starts(1) = decomp%zst(1)-1 47 | starts(2) = decomp%zst(2)-1 48 | starts(3) = decomp%zst(3)-1 49 | endif 50 | 51 | call MPI_TYPE_CREATE_SUBARRAY(3, sizes, subsizes, starts, & 52 | MPI_ORDER_FORTRAN, data_type, newtype, ierror) 53 | call MPI_TYPE_COMMIT(newtype,ierror) 54 | call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, & 55 | MPI_MODE_RDONLY, MPI_INFO_NULL, & 56 | fh, ierror) 57 | disp = 0_MPI_OFFSET_KIND 58 | call MPI_FILE_SET_VIEW(fh,disp,data_type, & 59 | newtype,'native',MPI_INFO_NULL,ierror) 60 | call MPI_FILE_READ_ALL(fh, var, & 61 | subsizes(1)*subsizes(2)*subsizes(3), & 62 | data_type, MPI_STATUS_IGNORE, ierror) 63 | call MPI_FILE_CLOSE(fh,ierror) 64 | call MPI_TYPE_FREE(newtype,ierror) 65 | -------------------------------------------------------------------------------- /src/2decomp/io_read_var.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! This file contain common code to be included by subroutines 13 | ! 'read_var_...' in io.f90 14 | 15 | ! Using MPI-IO to read a distributed 3D variable from a file. File 16 | ! operations (open/close) need to be done in calling application. This 17 | ! allows multiple variables to be read from a single file. Together 18 | ! with the corresponding write operation, this is the perfect solution 19 | ! for applications to perform restart/checkpointing. 20 | 21 | if (present(opt_decomp)) then 22 | decomp = opt_decomp 23 | else 24 | call get_decomp_info(decomp) 25 | end if 26 | 27 | ! Create file type and set file view 28 | sizes(1) = decomp%xsz(1) 29 | sizes(2) = decomp%ysz(2) 30 | sizes(3) = decomp%zsz(3) 31 | if (ipencil == 1) then 32 | subsizes(1) = decomp%xsz(1) 33 | subsizes(2) = decomp%xsz(2) 34 | subsizes(3) = decomp%xsz(3) 35 | starts(1) = decomp%xst(1)-1 ! 0-based index 36 | starts(2) = decomp%xst(2)-1 37 | starts(3) = decomp%xst(3)-1 38 | else if (ipencil == 2) then 39 | subsizes(1) = decomp%ysz(1) 40 | subsizes(2) = decomp%ysz(2) 41 | subsizes(3) = decomp%ysz(3) 42 | starts(1) = decomp%yst(1)-1 43 | starts(2) = decomp%yst(2)-1 44 | starts(3) = decomp%yst(3)-1 45 | else if (ipencil == 3) then 46 | subsizes(1) = decomp%zsz(1) 47 | subsizes(2) = decomp%zsz(2) 48 | subsizes(3) = decomp%zsz(3) 49 | starts(1) = decomp%zst(1)-1 50 | starts(2) = decomp%zst(2)-1 51 | starts(3) = decomp%zst(3)-1 52 | endif 53 | 54 | call MPI_TYPE_CREATE_SUBARRAY(3, sizes, subsizes, starts, & 55 | MPI_ORDER_FORTRAN, data_type, newtype, ierror) 56 | call MPI_TYPE_COMMIT(newtype,ierror) 57 | call MPI_FILE_SET_VIEW(fh,disp,data_type, & 58 | newtype,'native',MPI_INFO_NULL,ierror) 59 | call MPI_FILE_READ_ALL(fh, var, & 60 | subsizes(1)*subsizes(2)*subsizes(3), & 61 | data_type, MPI_STATUS_IGNORE, ierror) 62 | call MPI_TYPE_FREE(newtype,ierror) 63 | 64 | ! update displacement for the next read operation 65 | disp = disp + sizes(1)*sizes(2)*sizes(3)*mytype_bytes 66 | if (data_type == complex_type) then 67 | disp = disp + sizes(1)*sizes(2)*sizes(3)*mytype_bytes 68 | end if 69 | -------------------------------------------------------------------------------- /src/2decomp/io_write_one.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! This file contain common code to be included by subroutines 13 | ! 'mpiio_write_one_...' in io.f90 14 | 15 | ! Using MPI-IO to write a distributed 3D array into a file 16 | 17 | if (present(opt_decomp)) then 18 | decomp = opt_decomp 19 | else 20 | call get_decomp_info(decomp) 21 | end if 22 | 23 | ! determine subarray parameters 24 | sizes(1) = decomp%xsz(1) 25 | sizes(2) = decomp%ysz(2) 26 | sizes(3) = decomp%zsz(3) 27 | 28 | if (ipencil == 1) then 29 | subsizes(1) = decomp%xsz(1) 30 | subsizes(2) = decomp%xsz(2) 31 | subsizes(3) = decomp%xsz(3) 32 | starts(1) = decomp%xst(1)-1 ! 0-based index 33 | starts(2) = decomp%xst(2)-1 34 | starts(3) = decomp%xst(3)-1 35 | else if (ipencil == 2) then 36 | subsizes(1) = decomp%ysz(1) 37 | subsizes(2) = decomp%ysz(2) 38 | subsizes(3) = decomp%ysz(3) 39 | starts(1) = decomp%yst(1)-1 40 | starts(2) = decomp%yst(2)-1 41 | starts(3) = decomp%yst(3)-1 42 | else if (ipencil == 3) then 43 | subsizes(1) = decomp%zsz(1) 44 | subsizes(2) = decomp%zsz(2) 45 | subsizes(3) = decomp%zsz(3) 46 | starts(1) = decomp%zst(1)-1 47 | starts(2) = decomp%zst(2)-1 48 | starts(3) = decomp%zst(3)-1 49 | endif 50 | 51 | #if defined(_T3PIO) 52 | call MPI_INFO_CREATE(info, ierror) 53 | gs = ceiling(real(sizes(1),mytype)*real(sizes(2),mytype)* & 54 | real(sizes(3),mytype)/1024./1024.) 55 | call t3pio_set_info(MPI_COMM_WORLD, info, "./", ierror, & 56 | GLOBAL_SIZE=gs, factor=1) 57 | #endif 58 | 59 | call MPI_TYPE_CREATE_SUBARRAY(3, sizes, subsizes, starts, & 60 | MPI_ORDER_FORTRAN, data_type, newtype, ierror) 61 | call MPI_TYPE_COMMIT(newtype,ierror) 62 | #if defined(_T3PIO) 63 | call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, & 64 | MPI_MODE_CREATE+MPI_MODE_WRONLY, info, fh, ierror) 65 | #else 66 | call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, & 67 | MPI_MODE_CREATE+MPI_MODE_WRONLY, MPI_INFO_NULL, & 68 | fh, ierror) 69 | #endif 70 | filesize = 0_MPI_OFFSET_KIND 71 | call MPI_FILE_SET_SIZE(fh,filesize,ierror) ! guarantee overwriting 72 | disp = 0_MPI_OFFSET_KIND 73 | call MPI_FILE_SET_VIEW(fh,disp,data_type, & 74 | newtype,'native',MPI_INFO_NULL,ierror) 75 | call MPI_FILE_WRITE_ALL(fh, var, & 76 | subsizes(1)*subsizes(2)*subsizes(3), & 77 | data_type, MPI_STATUS_IGNORE, ierror) 78 | call MPI_FILE_CLOSE(fh,ierror) 79 | call MPI_TYPE_FREE(newtype,ierror) 80 | #if defined(_T3PIO) 81 | call MPI_INFO_FREE(info,ierror) 82 | #endif 83 | -------------------------------------------------------------------------------- /src/2decomp/io_write_plane.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! This file contain common code to be included by subroutines 13 | ! 'mpiio_write_plane_3d_...' in io.f90 14 | 15 | ! It is much easier to implement if all mpi ranks participate I/O. 16 | ! Transpose the 3D data if necessary. 17 | 18 | if (present(opt_decomp)) then 19 | decomp = opt_decomp 20 | else 21 | call get_decomp_info(decomp) 22 | end if 23 | 24 | if (iplane==1) then 25 | allocate(wk(decomp%xsz(1),decomp%xsz(2),decomp%xsz(3))) 26 | if (ipencil==1) then 27 | wk = var 28 | else if (ipencil==2) then 29 | call transpose_y_to_x(var,wk,decomp) 30 | else if (ipencil==3) then 31 | allocate(wk2(decomp%ysz(1),decomp%ysz(2),decomp%ysz(3))) 32 | call transpose_z_to_y(var,wk2,decomp) 33 | call transpose_y_to_x(wk2,wk,decomp) 34 | deallocate(wk2) 35 | end if 36 | allocate(wk2d(1,decomp%xsz(2),decomp%xsz(3))) 37 | do k=1,decomp%xsz(3) 38 | do j=1,decomp%xsz(2) 39 | wk2d(1,j,k)=wk(n,j,k) 40 | end do 41 | end do 42 | sizes(1) = 1 43 | sizes(2) = decomp%ysz(2) 44 | sizes(3) = decomp%zsz(3) 45 | subsizes(1) = 1 46 | subsizes(2) = decomp%xsz(2) 47 | subsizes(3) = decomp%xsz(3) 48 | starts(1) = 0 49 | starts(2) = decomp%xst(2)-1 50 | starts(3) = decomp%xst(3)-1 51 | 52 | else if (iplane==2) then 53 | allocate(wk(decomp%ysz(1),decomp%ysz(2),decomp%ysz(3))) 54 | if (ipencil==1) then 55 | call transpose_x_to_y(var,wk,decomp) 56 | else if (ipencil==2) then 57 | wk = var 58 | else if (ipencil==3) then 59 | call transpose_z_to_y(var,wk,decomp) 60 | end if 61 | allocate(wk2d(decomp%ysz(1),1,decomp%ysz(3))) 62 | do k=1,decomp%ysz(3) 63 | do i=1,decomp%ysz(1) 64 | wk2d(i,1,k)=wk(i,n,k) 65 | end do 66 | end do 67 | sizes(1) = decomp%xsz(1) 68 | sizes(2) = 1 69 | sizes(3) = decomp%zsz(3) 70 | subsizes(1) = decomp%ysz(1) 71 | subsizes(2) = 1 72 | subsizes(3) = decomp%ysz(3) 73 | starts(1) = decomp%yst(1)-1 74 | starts(2) = 0 75 | starts(3) = decomp%yst(3)-1 76 | 77 | else if (iplane==3) then 78 | allocate(wk(decomp%zsz(1),decomp%zsz(2),decomp%zsz(3))) 79 | if (ipencil==1) then 80 | allocate(wk2(decomp%ysz(1),decomp%ysz(2),decomp%ysz(3))) 81 | call transpose_x_to_y(var,wk2,decomp) 82 | call transpose_y_to_z(wk2,wk,decomp) 83 | deallocate(wk2) 84 | else if (ipencil==2) then 85 | call transpose_y_to_z(var,wk,decomp) 86 | else if (ipencil==3) then 87 | wk = var 88 | end if 89 | allocate(wk2d(decomp%zsz(1),decomp%zsz(2),1)) 90 | do j=1,decomp%zsz(2) 91 | do i=1,decomp%zsz(1) 92 | wk2d(i,j,1)=wk(i,j,n) 93 | end do 94 | end do 95 | sizes(1) = decomp%xsz(1) 96 | sizes(2) = decomp%ysz(2) 97 | sizes(3) = 1 98 | subsizes(1) = decomp%zsz(1) 99 | subsizes(2) = decomp%zsz(2) 100 | subsizes(3) = 1 101 | starts(1) = decomp%zst(1)-1 102 | starts(2) = decomp%zst(2)-1 103 | starts(3) = 0 104 | end if 105 | 106 | call MPI_TYPE_CREATE_SUBARRAY(3, sizes, subsizes, starts, & 107 | MPI_ORDER_FORTRAN, data_type, newtype, ierror) 108 | call MPI_TYPE_COMMIT(newtype,ierror) 109 | call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, & 110 | MPI_MODE_CREATE+MPI_MODE_WRONLY, MPI_INFO_NULL, & 111 | fh, ierror) 112 | filesize = 0_MPI_OFFSET_KIND 113 | call MPI_FILE_SET_SIZE(fh,filesize,ierror) ! guarantee overwriting 114 | disp = 0_MPI_OFFSET_KIND 115 | call MPI_FILE_SET_VIEW(fh,disp,data_type, & 116 | newtype,'native',MPI_INFO_NULL,ierror) 117 | call MPI_FILE_WRITE_ALL(fh, wk2d, & 118 | subsizes(1)*subsizes(2)*subsizes(3), & 119 | data_type, MPI_STATUS_IGNORE, ierror) 120 | call MPI_FILE_CLOSE(fh,ierror) 121 | call MPI_TYPE_FREE(newtype,ierror) 122 | 123 | deallocate(wk,wk2d) 124 | -------------------------------------------------------------------------------- /src/2decomp/io_write_var.f90: -------------------------------------------------------------------------------- 1 | !======================================================================= 2 | ! This is part of the 2DECOMP&FFT library 3 | ! 4 | ! 2DECOMP&FFT is a software framework for general-purpose 2D (pencil) 5 | ! decomposition. It also implements a highly scalable distributed 6 | ! three-dimensional Fast Fourier Transform (FFT). 7 | ! 8 | ! Copyright (C) 2009-2012 Ning Li, the Numerical Algorithms Group (NAG) 9 | ! 10 | !======================================================================= 11 | 12 | ! This file contain common code to be included by subroutines 13 | ! 'write_var_...' in io.f90 14 | 15 | ! Using MPI-IO to write a distributed 3D variable to a file. File 16 | ! operations (open/close) need to be done in calling application. This 17 | ! allows multiple variables to be written to a single file. Together 18 | ! with the corresponding read operation, this is the perfect solution 19 | ! for applications to perform restart/checkpointing. 20 | 21 | if (present(opt_decomp)) then 22 | decomp = opt_decomp 23 | else 24 | call get_decomp_info(decomp) 25 | end if 26 | 27 | ! Create file type and set file view 28 | sizes(1) = decomp%xsz(1) 29 | sizes(2) = decomp%ysz(2) 30 | sizes(3) = decomp%zsz(3) 31 | if (ipencil == 1) then 32 | subsizes(1) = decomp%xsz(1) 33 | subsizes(2) = decomp%xsz(2) 34 | subsizes(3) = decomp%xsz(3) 35 | starts(1) = decomp%xst(1)-1 ! 0-based index 36 | starts(2) = decomp%xst(2)-1 37 | starts(3) = decomp%xst(3)-1 38 | else if (ipencil == 2) then 39 | subsizes(1) = decomp%ysz(1) 40 | subsizes(2) = decomp%ysz(2) 41 | subsizes(3) = decomp%ysz(3) 42 | starts(1) = decomp%yst(1)-1 43 | starts(2) = decomp%yst(2)-1 44 | starts(3) = decomp%yst(3)-1 45 | else if (ipencil == 3) then 46 | subsizes(1) = decomp%zsz(1) 47 | subsizes(2) = decomp%zsz(2) 48 | subsizes(3) = decomp%zsz(3) 49 | starts(1) = decomp%zst(1)-1 50 | starts(2) = decomp%zst(2)-1 51 | starts(3) = decomp%zst(3)-1 52 | endif 53 | 54 | call MPI_TYPE_CREATE_SUBARRAY(3, sizes, subsizes, starts, & 55 | MPI_ORDER_FORTRAN, data_type, newtype, ierror) 56 | call MPI_TYPE_COMMIT(newtype,ierror) 57 | call MPI_FILE_SET_VIEW(fh,disp,data_type, & 58 | newtype,'native',MPI_INFO_NULL,ierror) 59 | call MPI_FILE_WRITE_ALL(fh, var, & 60 | subsizes(1)*subsizes(2)*subsizes(3), & 61 | data_type, MPI_STATUS_IGNORE, ierror) 62 | call MPI_TYPE_FREE(newtype,ierror) 63 | 64 | ! update displacement for the next write operation 65 | disp = disp + sizes(1)*sizes(2)*sizes(3)*mytype_bytes 66 | if (data_type == complex_type) then 67 | disp = disp + sizes(1)*sizes(2)*sizes(3)*mytype_bytes 68 | end if 69 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile 2 | 3 | ARCH ?= generic-gnu 4 | APP ?= two_phase_inc_isot 5 | USE_FAST_KERNELS ?= 0 6 | 7 | include targets/target.$(ARCH) 8 | include apps/$(APP)/app.$(APP) 9 | 10 | POST_PATH = apps/$(APP)/postp.$(APP) -I./postprocessing 11 | 12 | .PHONY: clean 13 | 14 | .SUFFIXES : 15 | .SUFFIXES : .o .c .f90 16 | 17 | .f90.o: 18 | $(FC) $(FFLAGS) $(DFLAGS) $(CPP_FLAGS) -c -I$(POST_PATH) -o $@ $< 19 | 20 | .c.o: 21 | $(CC) $(CFLAGS) -c -o $@ $< 22 | 23 | OBJS = profiler.o \ 24 | bound.o \ 25 | chkdiv.o \ 26 | chkdt.o \ 27 | common_mpi.o \ 28 | correc.o debug.o \ 29 | fft.o \ 30 | fftw.o \ 31 | fillps.o \ 32 | initflow.o \ 33 | initgrid.o \ 34 | initmpi.o \ 35 | initsolver.o \ 36 | load.o \ 37 | output.o \ 38 | apps/$(APP)/param.o \ 39 | rk.o \ 40 | source.o \ 41 | sanity.o \ 42 | 2decomp/decomp_2d.o \ 43 | 2decomp/io.o \ 44 | types.o 45 | 46 | ########################################################### 47 | ######## TO CUSTOMIZE BASED ON APPLICATION PHYSICS ######## 48 | ########################################################### 49 | 50 | 51 | ifeq ($(USE_FAST_KERNELS),1) 52 | DFLAGS += -D_FAST_KERNELS_1 53 | DFLAGS += -D_FAST_KERNELS_2 54 | DFLAGS += -D_FAST_KERNELS_3 55 | DFLAGS += -D_FAST_KERNELS_4 56 | endif 57 | 58 | ifeq ($(TURB_FORCING),1) 59 | DFLAGS += -D_TURB_FORCING 60 | endif 61 | 62 | ifeq ($(DO_POSTPROC),1) 63 | OBJS += postprocessing/post.o 64 | ifeq ($(USE_VOF),1) 65 | OBJS += postprocessing/tagging.o 66 | endif 67 | DFLAGS += -D_DO_POSTPROC 68 | endif 69 | 70 | ifeq ($(CONSTANT_COEFFS_POISSON),1) 71 | DFLAGS += -D_CONSTANT_COEFFS_POISSON 72 | OBJS += solver_gpu.o solver_cpu.o 73 | else 74 | OBJS += solver_vc.o 75 | LDFLAGS += -lHYPRE 76 | endif 77 | 78 | ifeq ($(HEAT_TRANSFER),1) 79 | DFLAGS += -D_HEAT_TRANSFER 80 | ifeq ($(BOUSSINESQ),1) 81 | DFLAGS += -D_BOUSSINESQ 82 | endif 83 | OBJS += gradls.o moms.o rks.o cmpt_divth.o 84 | endif 85 | 86 | ifeq ($(USE_VOF),1) 87 | DFLAGS += -D_USE_VOF 88 | OBJS += vof.o mom.o funcs.o 89 | else 90 | OBJS += mom.o funcs.o 91 | endif 92 | 93 | ifeq ($(USE_NVTX),1) 94 | DFLAGS += -D_USE_NVTX 95 | LDFLAGS += $(NVTX_LIB) 96 | endif 97 | 98 | ifeq ($(BENCHMARK_NO_IO),1) 99 | DFLAGS += -D_BENCHMARK_NO_IO 100 | endif 101 | 102 | 103 | ########################################################### 104 | 105 | TARGET = flutas 106 | 107 | all: $(TARGET).$(APP) 108 | ln -sf $(TARGET).$(APP) $(TARGET) 109 | 110 | main.o : apps/$(APP)/$(MAIN_SRC) $(OBJS) 111 | $(FC) $(FFLAGS) $(DFLAGS) $(CPP_FLAGS) -I$(POST_PATH) -c -o main.o $< 112 | 113 | $(TARGET).$(APP): $(OBJS) main.o 114 | $(FC) $(FFLAGS) $(DFLAGS) $(OBJS) $(LDFLAGS) main.o -o $(TARGET).$(APP) 115 | 116 | clean-obj: 117 | rm -rf *.o *.mod *dSYM && rm -rf 2decomp/*.{mod,d,o} && rm -rf postprocessing/*.{mod,d,o} && rm -rf apps/$(APP)/*.{mod,d,o} 118 | 119 | clean: clean-obj 120 | rm -rf $(TARGET)* 121 | 122 | include make.deps 123 | -------------------------------------------------------------------------------- /src/apps/single_phase/app.single_phase: -------------------------------------------------------------------------------- 1 | # Physics switches 2 | CONSTANT_COEFFS_POISSON ?= 1 3 | USE_VOF ?= 0 4 | VOF_DBG ?= 0 5 | HEAT_TRANSFER ?= 0 6 | DO_POSTPROC ?= 0 7 | TURB_FORCING ?= 0 8 | BOUSSINESQ ?= 0 9 | 10 | MAIN_SRC=main__single_phase.f90 11 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/allocation_x.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(1)), & 2 | v_avg_g(ng(1)), & 3 | w_avg_g(ng(1)), & 4 | u_sqr_g(ng(1)), & 5 | v_sqr_g(ng(1)), & 6 | w_sqr_g(ng(1)), & 7 | tmp_avg_g(ng(1)), & 8 | tmp_sqr_g(ng(1)), & 9 | utmp_avg_g(ng(1)), & 10 | vtmp_avg_g(ng(1)), & 11 | wtmp_avg_g(ng(1)), & 12 | utmp_sqr_g(ng(1)), & 13 | vtmp_sqr_g(ng(1)), & 14 | wtmp_sqr_g(ng(1)), & 15 | uv_avg_g(ng(1)), & 16 | vw_avg_g(ng(1)), & 17 | wu_avg_g(ng(1)), & 18 | uv_sqr_g(ng(1)), & 19 | vw_sqr_g(ng(1)), & 20 | wu_sqr_g(ng(1)), & 21 | vorx_avg_g(ng(1)), & 22 | vorx_sqr_g(ng(1))) 23 | 24 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/allocation_y.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(2)), & 2 | v_avg_g(ng(2)), & 3 | w_avg_g(ng(2)), & 4 | u_sqr_g(ng(2)), & 5 | v_sqr_g(ng(2)), & 6 | w_sqr_g(ng(2)), & 7 | tmp_avg_g(ng(2)), & 8 | tmp_sqr_g(ng(2)), & 9 | utmp_avg_g(ng(2)), & 10 | vtmp_avg_g(ng(2)), & 11 | wtmp_avg_g(ng(2)), & 12 | utmp_sqr_g(ng(2)), & 13 | vtmp_sqr_g(ng(2)), & 14 | wtmp_sqr_g(ng(2)), & 15 | uv_avg_g(ng(2)), & 16 | vw_avg_g(ng(2)), & 17 | wu_avg_g(ng(2)), & 18 | uv_sqr_g(ng(2)), & 19 | vw_sqr_g(ng(2)), & 20 | wu_sqr_g(ng(2)), & 21 | vorx_avg_g(ng(2)), & 22 | vorx_sqr_g(ng(2))) 23 | 24 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/allocation_z.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(3)), & 2 | v_avg_g(ng(3)), & 3 | w_avg_g(ng(3)), & 4 | u_sqr_g(ng(3)), & 5 | v_sqr_g(ng(3)), & 6 | w_sqr_g(ng(3)), & 7 | tmp_avg_g(ng(3)), & 8 | tmp_sqr_g(ng(3)), & 9 | utmp_avg_g(ng(3)), & 10 | vtmp_avg_g(ng(3)), & 11 | wtmp_avg_g(ng(3)), & 12 | utmp_sqr_g(ng(3)), & 13 | vtmp_sqr_g(ng(3)), & 14 | wtmp_sqr_g(ng(3)), & 15 | uv_avg_g(ng(3)), & 16 | vw_avg_g(ng(3)), & 17 | wu_avg_g(ng(3)), & 18 | uv_sqr_g(ng(3)), & 19 | vw_sqr_g(ng(3)), & 20 | wu_sqr_g(ng(3)), & 21 | vorx_avg_g(ng(3)), & 22 | vorx_sqr_g(ng(3))) 23 | 24 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/out1d.h90: -------------------------------------------------------------------------------- 1 | ! out1d(fname,n,idir,z,dzlzi,p) 2 | ! 3 | ! writes the profile of a variable averaged 4 | ! over two domain directions (see output.f90) 5 | ! 6 | ! fname -> name of the file 7 | ! n -> size of the input array 8 | ! idir -> direction of the profile 9 | ! z -> z coordinate (grid is non-uniform in z) 10 | ! dzlzi -> dz/lz weight of a grid cell for averaging over z 11 | ! p -> 3D input scalar field 12 | ! 13 | ! modify the calls below as desired 14 | ! 15 | call out1d(trim(datadir)//'umean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,u) 16 | call out1d(trim(datadir)//'vmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,v) 17 | call out1d(trim(datadir)//'wmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zf_g,dzc/lz,w) 18 | call out1d(trim(datadir)//'umean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,u) 19 | call out1d(trim(datadir)//'vmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,v) 20 | call out1d(trim(datadir)//'wmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,w) 21 | call out1d(trim(datadir)//'umean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,u) 22 | call out1d(trim(datadir)//'vmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,v) 23 | call out1d(trim(datadir)//'wmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,w) 24 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/out2d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_2d(datadir,fname_bin,fname_log,varname,inorm,nslice,ng,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (each different plane that is saved should 10 | ! correspond to a different log file) 11 | ! varname -> name of the variable that is saved 12 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 13 | ! three components of the vector field 14 | ! inorm -> plane is perpendicular to direction inorm (1, 2, or 3) 15 | ! islice -> plane is of constant index islice in direction inorm 16 | ! ng -> array with the global number of points in each direction 17 | ! time -> physical time 18 | ! istep -> time step number 19 | ! p -> 3D input scalar field 20 | ! 21 | ! modify the calls below as desired 22 | ! 23 | call write_visu_2d(datadir,'vex_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_X', & 24 | 1,ng(1)/2,ng,time,istep, & 25 | u(1:n(1),1:n(2),1:n(3))) 26 | call write_visu_2d(datadir,'vey_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Y', & 27 | 1,ng(1)/2,ng,time,istep, & 28 | v(1:n(1),1:n(2),1:n(3))) 29 | call write_visu_2d(datadir,'vez_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Z', & 30 | 1,ng(1)/2,ng,time,istep, & 31 | w(1:n(1),1:n(2),1:n(3))) 32 | call write_visu_2d(datadir,'pre_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Pressure_P', & 33 | 1,ng(1)/2,ng,time,istep, & 34 | p(1:n(1),1:n(2),1:n(3))) 35 | !call write_visu_2d(datadir,'tmp_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Temperature', & 36 | ! 1,ng(1)/2,ng,time,istep, & 37 | ! tmp(1:n(1),1:n(2),1:n(3))) 38 | 39 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/out3d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_3d(datadir,fname_bin,fname_log,varname,nmin,nmax,nskip,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (can be the same for a time series of data with the same grid) 10 | ! varname -> name of the variable that is saved 11 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 12 | ! three components of the vector field 13 | ! nmin -> first element of the field that is saved in each direction, e.g. (/1,1,1/) 14 | ! nmax -> last element of the field that is saved in each direction, e.g. (/ng(1),ng(2),ng(3)/) 15 | ! nskip -> step size with which the grid points are saved, e.g. (/1,1,1/) if the whole array is saved 16 | ! time -> physical time 17 | ! istep -> time step number 18 | ! p -> 3D input scalar field 19 | ! 20 | ! modify the calls below as desired 21 | ! 22 | call write_visu_3d(datadir,'vex_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_X', & 23 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 24 | u(1:n(1),1:n(2),1:n(3))) 25 | call write_visu_3d(datadir,'vey_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Y', & 26 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 27 | v(1:n(1),1:n(2),1:n(3))) 28 | call write_visu_3d(datadir,'vez_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Z', & 29 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 30 | w(1:n(1),1:n(2),1:n(3))) 31 | call write_visu_3d(datadir,'pre_fld_'//fldnum//'.bin','log_visu_3d.out','Pressure_P', & 32 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 33 | p(1:n(1),1:n(2),1:n(3))) 34 | !call write_visu_3d(datadir,'tmp_fld_'//fldnum//'.bin','log_visu_3d.out','Temperature', & 35 | ! (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 36 | ! tmp(1:n(1),1:n(2),1:n(3))) 37 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/time_averaging_gas.h90: -------------------------------------------------------------------------------- 1 | !!!!!!!!!!!!!!!! 2 | !GAS 3 | !!!!!!!!!!!!!!!! 4 | call time_sp_avg( avg_dir,do_avg,do_favre,(/1,0,0/),'data/post/time_averaging/u_avg_fld_' //fldnum//'_g.out', & 5 | n,ng,istep,i_av,iout1d,nh_d,nh_u, & 6 | rho,u, & 7 | u_avg_g,u_sqr_g,u_vol_avg_g,u_vol_sqr_g ) 8 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,1,0/),'data/post/time_averaging/v_avg_fld_' //fldnum//'_g.out', & 9 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 10 | rho,v, & 11 | v_avg_g,v_sqr_g,v_vol_avg_g,v_vol_sqr_g ) 12 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,1/),'data/post/time_averaging/w_avg_fld_' //fldnum//'_g.out', & 13 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 14 | rho,w, & 15 | w_avg_g,w_sqr_g,w_vol_avg_g,w_vol_sqr_g ) 16 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 17 | !GAS <(uv)^2> <(vw)^2> <(wu)^2> 18 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 19 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/uv_avg_fld_' //fldnum//'_g.out', & 20 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 21 | rho,uv, & 22 | uv_avg_g,uv_sqr_g,uv_vol_avg_g,uv_vol_sqr_g ) 23 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vw_avg_fld_' //fldnum//'_g.out', & 24 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 25 | rho,vw, & 26 | vw_avg_g,vw_sqr_g,vw_vol_avg_g,vw_vol_sqr_g ) 27 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wu_avg_fld_' //fldnum//'_g.out', & 28 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 29 | rho,wu, & 30 | wu_avg_g,wu_sqr_g,wu_vol_avg_g,wu_vol_sqr_g ) 31 | !!!!!!!!!!!!!!!! 32 | !GAS 33 | !!!!!!!!!!!!!!!! 34 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vorx_avg_fld_' //fldnum//'_g.out', & 35 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 36 | rho,vorx, & 37 | vorx_avg_g,vorx_sqr_g,vorx_vol_avg_g,vorx_vol_sqr_g ) 38 | -------------------------------------------------------------------------------- /src/apps/single_phase/postp.single_phase/time_averaging_heat_transfer.h90: -------------------------------------------------------------------------------- 1 | !!!!!!!!!!!!!!!! 2 | !GAS 3 | !!!!!!!!!!!!!!!! 4 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/tmp_avg_fld_' //fldnum//'_g.out', & 5 | n,ng,istep,i_av,iout1d,nh_d,nh_t, & 6 | rho,tmp, & 7 | tmp_avg_g,tmp_sqr_g,tmp_vol_avg_g,tmp_vol_sqr_g ) 8 | !!!!!!!!!!!!!!!! 9 | !GAS <(uT)^2> <(vT)^2> <(wT)^2> 10 | !!!!!!!!!!!!!!!! 11 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/utmp_avg_fld_' //fldnum//'_g.out', & 12 | n,ng,istep,i_av,iout1d,nh_d,0, & 13 | rho,utmp, & 14 | utmp_avg_g,utmp_sqr_g,utmp_vol_avg_g,utmp_vol_sqr_g ) 15 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vtmp_avg_fld_' //fldnum//'_g.out', & 16 | n,ng,istep,i_av,iout1d,nh_d,0, & 17 | rho,vtmp, & 18 | vtmp_avg_g,vtmp_sqr_g,vtmp_vol_avg_g,vtmp_vol_sqr_g ) 19 | call time_sp_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wtmp_avg_fld_' //fldnum//'_g.out', & 20 | n,ng,istep,i_av,iout1d,nh_d,0, & 21 | rho,wtmp, & 22 | wtmp_avg_g,wtmp_sqr_g,wtmp_vol_avg_g,wtmp_vol_sqr_g ) 23 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/app.two_phase_ht: -------------------------------------------------------------------------------- 1 | # Physics switches 2 | CONSTANT_COEFFS_POISSON ?= 1 3 | USE_VOF ?= 1 4 | VOF_DBG ?= 0 5 | HEAT_TRANSFER ?= 1 6 | DO_POSTPROC ?= 1 7 | TURB_FORCING ?= 0 8 | BOUSSINESQ ?= 1 9 | 10 | MAIN_SRC=main__two_phase_ht.f90 11 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/allocation_x.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(1)), & 2 | v_avg_g(ng(1)), & 3 | w_avg_g(ng(1)), & 4 | u_avg_l(ng(1)), & 5 | v_avg_l(ng(1)), & 6 | w_avg_l(ng(1)), & 7 | u_sqr_g(ng(1)), & 8 | v_sqr_g(ng(1)), & 9 | w_sqr_g(ng(1)), & 10 | u_sqr_l(ng(1)), & 11 | v_sqr_l(ng(1)), & 12 | w_sqr_l(ng(1)), & 13 | tmp_avg_g(ng(1)), & 14 | tmp_sqr_g(ng(1)), & 15 | tmp_avg_l(ng(1)), & 16 | tmp_sqr_l(ng(1)), & 17 | utmp_avg_g(ng(1)), & 18 | vtmp_avg_g(ng(1)), & 19 | wtmp_avg_g(ng(1)), & 20 | utmp_avg_l(ng(1)), & 21 | vtmp_avg_l(ng(1)), & 22 | wtmp_avg_l(ng(1)), & 23 | utmp_sqr_g(ng(1)), & 24 | vtmp_sqr_g(ng(1)), & 25 | wtmp_sqr_g(ng(1)), & 26 | utmp_sqr_l(ng(1)), & 27 | vtmp_sqr_l(ng(1)), & 28 | wtmp_sqr_l(ng(1)), & 29 | uv_avg_g(ng(1)), & 30 | vw_avg_g(ng(1)), & 31 | wu_avg_g(ng(1)), & 32 | uv_avg_l(ng(1)), & 33 | vw_avg_l(ng(1)), & 34 | wu_avg_l(ng(1)), & 35 | uv_sqr_g(ng(1)), & 36 | vw_sqr_g(ng(1)), & 37 | wu_sqr_g(ng(1)), & 38 | uv_sqr_l(ng(1)), & 39 | vw_sqr_l(ng(1)), & 40 | wu_sqr_l(ng(1)), & 41 | void_avg(ng(1)), & 42 | void_sqr(ng(1)), & 43 | vorx_avg_g(ng(1)), & 44 | vorx_avg_l(ng(1)), & 45 | vorx_sqr_g(ng(1)), & 46 | vorx_sqr_l(ng(1))) 47 | 48 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/allocation_y.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(2)), & 2 | v_avg_g(ng(2)), & 3 | w_avg_g(ng(2)), & 4 | u_avg_l(ng(2)), & 5 | v_avg_l(ng(2)), & 6 | w_avg_l(ng(2)), & 7 | u_sqr_g(ng(2)), & 8 | v_sqr_g(ng(2)), & 9 | w_sqr_g(ng(2)), & 10 | u_sqr_l(ng(2)), & 11 | v_sqr_l(ng(2)), & 12 | w_sqr_l(ng(2)), & 13 | tmp_avg_g(ng(2)), & 14 | tmp_sqr_g(ng(2)), & 15 | tmp_avg_l(ng(2)), & 16 | tmp_sqr_l(ng(2)), & 17 | utmp_avg_g(ng(2)), & 18 | vtmp_avg_g(ng(2)), & 19 | wtmp_avg_g(ng(2)), & 20 | utmp_avg_l(ng(2)), & 21 | vtmp_avg_l(ng(2)), & 22 | wtmp_avg_l(ng(2)), & 23 | utmp_sqr_g(ng(2)), & 24 | vtmp_sqr_g(ng(2)), & 25 | wtmp_sqr_g(ng(2)), & 26 | utmp_sqr_l(ng(2)), & 27 | vtmp_sqr_l(ng(2)), & 28 | wtmp_sqr_l(ng(2)), & 29 | uv_avg_g(ng(2)), & 30 | vw_avg_g(ng(2)), & 31 | wu_avg_g(ng(2)), & 32 | uv_avg_l(ng(2)), & 33 | vw_avg_l(ng(2)), & 34 | wu_avg_l(ng(2)), & 35 | uv_sqr_g(ng(2)), & 36 | vw_sqr_g(ng(2)), & 37 | wu_sqr_g(ng(2)), & 38 | uv_sqr_l(ng(2)), & 39 | vw_sqr_l(ng(2)), & 40 | wu_sqr_l(ng(2)), & 41 | void_avg(ng(2)), & 42 | void_sqr(ng(2)), & 43 | vorx_avg_g(ng(2)), & 44 | vorx_avg_l(ng(2)), & 45 | vorx_sqr_g(ng(2)), & 46 | vorx_sqr_l(ng(2))) 47 | 48 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/allocation_z.h90: -------------------------------------------------------------------------------- 1 | allocate ( u_avg_g(ng(3)), & 2 | v_avg_g(ng(3)), & 3 | w_avg_g(ng(3)), & 4 | u_avg_l(ng(3)), & 5 | v_avg_l(ng(3)), & 6 | w_avg_l(ng(3)), & 7 | u_sqr_g(ng(3)), & 8 | v_sqr_g(ng(3)), & 9 | w_sqr_g(ng(3)), & 10 | u_sqr_l(ng(3)), & 11 | v_sqr_l(ng(3)), & 12 | w_sqr_l(ng(3)), & 13 | tmp_avg_g(ng(3)), & 14 | tmp_sqr_g(ng(3)), & 15 | tmp_avg_l(ng(3)), & 16 | tmp_sqr_l(ng(3)), & 17 | utmp_avg_g(ng(3)), & 18 | vtmp_avg_g(ng(3)), & 19 | wtmp_avg_g(ng(3)), & 20 | utmp_avg_l(ng(3)), & 21 | vtmp_avg_l(ng(3)), & 22 | wtmp_avg_l(ng(3)), & 23 | utmp_sqr_g(ng(3)), & 24 | vtmp_sqr_g(ng(3)), & 25 | wtmp_sqr_g(ng(3)), & 26 | utmp_sqr_l(ng(3)), & 27 | vtmp_sqr_l(ng(3)), & 28 | wtmp_sqr_l(ng(3)), & 29 | uv_avg_g(ng(3)), & 30 | vw_avg_g(ng(3)), & 31 | wu_avg_g(ng(3)), & 32 | uv_avg_l(ng(3)), & 33 | vw_avg_l(ng(3)), & 34 | wu_avg_l(ng(3)), & 35 | uv_sqr_g(ng(3)), & 36 | vw_sqr_g(ng(3)), & 37 | wu_sqr_g(ng(3)), & 38 | uv_sqr_l(ng(3)), & 39 | vw_sqr_l(ng(3)), & 40 | wu_sqr_l(ng(3)), & 41 | void_avg(ng(3)), & 42 | void_sqr(ng(3)), & 43 | vorx_avg_g(ng(3)), & 44 | vorx_avg_l(ng(3)), & 45 | vorx_sqr_g(ng(3)), & 46 | vorx_sqr_l(ng(3))) 47 | 48 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/out1d.h90: -------------------------------------------------------------------------------- 1 | ! out1d(fname,n,idir,z,dzlzi,p) 2 | ! 3 | ! writes the profile of a variable averaged 4 | ! over two domain directions (see output.f90) 5 | ! 6 | ! fname -> name of the file 7 | ! n -> size of the input array 8 | ! idir -> direction of the profile 9 | ! z -> z coordinate (grid is non-uniform in z) 10 | ! dzlzi -> dz/lz weight of a grid cell for averaging over z 11 | ! p -> 3D input scalar field 12 | ! 13 | ! modify the calls below as desired 14 | ! 15 | !call out1d(trim(datadir)//'umean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,u) 16 | !call out1d(trim(datadir)//'vmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,v) 17 | !call out1d(trim(datadir)//'wmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zf_g,dzc/lz,w) 18 | !call out1d(trim(datadir)//'umean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,u) 19 | !call out1d(trim(datadir)//'vmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,v) 20 | !call out1d(trim(datadir)//'wmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,w) 21 | !call out1d(trim(datadir)//'umean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,u) 22 | !call out1d(trim(datadir)//'vmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,v) 23 | !call out1d(trim(datadir)//'wmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,w) 24 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/out2d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_2d(datadir,fname_bin,fname_log,varname,inorm,nslice,ng,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (each different plane that is saved should 10 | ! correspond to a different log file) 11 | ! varname -> name of the variable that is saved 12 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 13 | ! three components of the vector field 14 | ! inorm -> plane is perpendicular to direction inorm (1, 2, or 3) 15 | ! islice -> plane is of constant index islice in direction inorm 16 | ! ng -> array with the global number of points in each direction 17 | ! time -> physical time 18 | ! istep -> time step number 19 | ! p -> 3D input scalar field 20 | ! 21 | ! modify the calls below as desired 22 | ! 23 | call write_visu_2d(datadir,'vex_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_X', & 24 | 1,ng(1)/2,ng,time,istep, & 25 | u(1:n(1),1:n(2),1:n(3))) 26 | call write_visu_2d(datadir,'vey_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Y', & 27 | 1,ng(1)/2,ng,time,istep, & 28 | v(1:n(1),1:n(2),1:n(3))) 29 | call write_visu_2d(datadir,'vez_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Z', & 30 | 1,ng(1)/2,ng,time,istep, & 31 | w(1:n(1),1:n(2),1:n(3))) 32 | call write_visu_2d(datadir,'pre_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Pressure_P', & 33 | 1,ng(1)/2,ng,time,istep, & 34 | p(1:n(1),1:n(2),1:n(3))) 35 | call write_visu_2d(datadir,'tmp_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Temperature', & 36 | 1,ng(1)/2,ng,time,istep, & 37 | tmp(1:n(1),1:n(2),1:n(3))) 38 | call write_visu_2d(datadir,'vof_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','VoF', & 39 | 1,ng(1)/2,ng,time,istep, & 40 | psi(1:n(1),1:n(2),1:n(3))) 41 | 42 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/out3d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_3d(datadir,fname_bin,fname_log,varname,nmin,nmax,nskip,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (can be the same for a time series of data with the same grid) 10 | ! varname -> name of the variable that is saved 11 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 12 | ! three components of the vector field 13 | ! nmin -> first element of the field that is saved in each direction, e.g. (/1,1,1/) 14 | ! nmax -> last element of the field that is saved in each direction, e.g. (/ng(1),ng(2),ng(3)/) 15 | ! nskip -> step size with which the grid points are saved, e.g. (/1,1,1/) if the whole array is saved 16 | ! time -> physical time 17 | ! istep -> time step number 18 | ! p -> 3D input scalar field 19 | ! 20 | ! modify the calls below as desired 21 | ! 22 | call write_visu_3d(datadir,'vex_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_X', & 23 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 24 | u(1:n(1),1:n(2),1:n(3))) 25 | call write_visu_3d(datadir,'vey_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Y', & 26 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 27 | v(1:n(1),1:n(2),1:n(3))) 28 | call write_visu_3d(datadir,'vez_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Z', & 29 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 30 | w(1:n(1),1:n(2),1:n(3))) 31 | call write_visu_3d(datadir,'pre_fld_'//fldnum//'.bin','log_visu_3d.out','Pressure_P', & 32 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 33 | p(1:n(1),1:n(2),1:n(3))) 34 | call write_visu_3d(datadir,'tmp_fld_'//fldnum//'.bin','log_visu_3d.out','Temperature', & 35 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 36 | tmp(1:n(1),1:n(2),1:n(3))) 37 | call write_visu_3d(datadir,'vof_fld_'//fldnum//'.bin','log_visu_3d.out','VoF', & 38 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 39 | psi(1:n(1),1:n(2),1:n(3))) 40 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/time_averaging_gas.h90: -------------------------------------------------------------------------------- 1 | !!!!!!!!!!!!!!!! 2 | !GAS 3 | !!!!!!!!!!!!!!!! 4 | call time_tw_avg( avg_dir,do_avg,do_favre,(/1,0,0/),'data/post/time_averaging/u_avg_fld_' //fldnum//'_g.out', & 5 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 6 | (1.d0-psi),rho,u, & 7 | u_avg_g,u_sqr_g,u_vol_avg_g,u_vol_sqr_g ) 8 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,1,0/),'data/post/time_averaging/v_avg_fld_' //fldnum//'_g.out', & 9 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 10 | (1.d0-psi),rho,v, & 11 | v_avg_g,v_sqr_g,v_vol_avg_g,v_vol_sqr_g ) 12 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,1/),'data/post/time_averaging/w_avg_fld_' //fldnum//'_g.out', & 13 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 14 | (1.d0-psi),rho,w, & 15 | w_avg_g,w_sqr_g,w_vol_avg_g,w_vol_sqr_g ) 16 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 17 | !GAS <(uv)^2> <(vw)^2> <(wu)^2> 18 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 19 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/uv_avg_fld_' //fldnum//'_g.out', & 20 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 21 | (1.d0-psi),rho,uv, & 22 | uv_avg_g,uv_sqr_g,uv_vol_avg_g,uv_vol_sqr_g ) 23 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vw_avg_fld_' //fldnum//'_g.out', & 24 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 25 | (1.d0-psi),rho,vw, & 26 | vw_avg_g,vw_sqr_g,vw_vol_avg_g,vw_vol_sqr_g ) 27 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wu_avg_fld_' //fldnum//'_g.out', & 28 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 29 | (1.d0-psi),rho,wu, & 30 | wu_avg_g,wu_sqr_g,wu_vol_avg_g,wu_vol_sqr_g ) 31 | !!!!!!!!!!!!!!!! 32 | !GAS 33 | !!!!!!!!!!!!!!!! 34 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vorx_avg_fld_' //fldnum//'_g.out', & 35 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 36 | (1.d0-psi),rho,vorx, & 37 | vorx_avg_g,vorx_sqr_g,vorx_vol_avg_g,vorx_vol_sqr_g ) 38 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/time_averaging_heat_transfer.h90: -------------------------------------------------------------------------------- 1 | !!!!!!!!!!!!!!!! 2 | !LIQUID 3 | !!!!!!!!!!!!!!!! 4 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/tmp_avg_fld_' //fldnum//'.out', & 5 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_t, & 6 | psi,rho,tmp, & 7 | tmp_avg_l,tmp_sqr_l,tmp_vol_avg_l,tmp_vol_sqr_l ) 8 | !!!!!!!!!!!!!!!! 9 | !LIQUID <(uT)^2> <(vT)^2> <(wT)^2> 10 | !!!!!!!!!!!!!!!! 11 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/utmp_avg_fld_' //fldnum//'.out', & 12 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 13 | psi,rho,utmp, & 14 | utmp_avg_l,utmp_sqr_l,utmp_vol_avg_l,utmp_vol_sqr_l ) 15 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vtmp_avg_fld_' //fldnum//'.out', & 16 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 17 | psi,rho,vtmp, & 18 | vtmp_avg_l,vtmp_sqr_l,vtmp_vol_avg_l,vtmp_vol_sqr_l ) 19 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wtmp_avg_fld_' //fldnum//'.out', & 20 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 21 | psi,rho,wtmp, & 22 | wtmp_avg_l,wtmp_sqr_l,wtmp_vol_avg_l,wtmp_vol_sqr_l ) 23 | !!!!!!!!!!!!!!!! 24 | !GAS 25 | !!!!!!!!!!!!!!!! 26 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/tmp_avg_fld_' //fldnum//'_g.out', & 27 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_t, & 28 | (1.d0-psi),rho,tmp, & 29 | tmp_avg_g,tmp_sqr_g,tmp_vol_avg_g,tmp_vol_sqr_g ) 30 | !!!!!!!!!!!!!!!! 31 | !GAS <(uT)^2> <(vT)^2> <(wT)^2> 32 | !!!!!!!!!!!!!!!! 33 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/utmp_avg_fld_' //fldnum//'_g.out', & 34 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 35 | (1.d0-psi),rho,utmp, & 36 | utmp_avg_g,utmp_sqr_g,utmp_vol_avg_g,utmp_vol_sqr_g ) 37 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vtmp_avg_fld_' //fldnum//'_g.out', & 38 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 39 | (1.d0-psi),rho,vtmp, & 40 | vtmp_avg_g,vtmp_sqr_g,vtmp_vol_avg_g,vtmp_vol_sqr_g ) 41 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wtmp_avg_fld_' //fldnum//'_g.out', & 42 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 43 | (1.d0-psi),rho,wtmp, & 44 | wtmp_avg_g,wtmp_sqr_g,wtmp_vol_avg_g,wtmp_vol_sqr_g ) 45 | -------------------------------------------------------------------------------- /src/apps/two_phase_ht/postp.two_phase_ht/time_averaging_liquid.h90: -------------------------------------------------------------------------------- 1 | !!!!!!!!!!!!!!!! 2 | !VOID FRACTION <1-psi> <(1-psi)^2> 3 | !!!!!!!!!!!!!!!! 4 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/void_avg_fld_' //fldnum//'.out', & 5 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_v, & 6 | (0.d0*psi),rho,(1.d0 - psi),& 7 | void_avg,void_sqr,void_vol_avg,void_vol_sqr ) 8 | !!!!!!!!!!!!!!!! 9 | !LIQUID 10 | !!!!!!!!!!!!!!!! 11 | call time_tw_avg( avg_dir,do_avg,do_favre,(/1,0,0/),'data/post/time_averaging/u_avg_fld_' //fldnum//'.out', & 12 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 13 | psi,rho,u, & 14 | u_avg_l,u_sqr_l,u_vol_avg_l,u_vol_sqr_l ) 15 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,1,0/),'data/post/time_averaging/v_avg_fld_' //fldnum//'.out', & 16 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 17 | psi,rho,v, & 18 | v_avg_l,v_sqr_l,v_vol_avg_l,v_vol_sqr_l ) 19 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,1/),'data/post/time_averaging/w_avg_fld_' //fldnum//'.out', & 20 | n,ng,istep,i_av,iout1d,nh_d,nh_v,nh_u, & 21 | psi,rho,w, & 22 | w_avg_l,w_sqr_l,w_vol_avg_l,w_vol_sqr_l ) 23 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 24 | !LIQUID <(uv)^2> <(vw)^2> <(wu)^2> 25 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 26 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/uv_avg_fld_' //fldnum//'.out', & 27 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 28 | psi,rho,uv, & 29 | uv_avg_l,uv_sqr_l,uv_vol_avg_l,uv_vol_sqr_l ) 30 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vw_avg_fld_' //fldnum//'.out', & 31 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 32 | psi,rho,vw, & 33 | vw_avg_l,vw_sqr_l,vw_vol_avg_l,vw_vol_sqr_l ) 34 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/wu_avg_fld_' //fldnum//'.out', & 35 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 36 | psi,rho,wu, & 37 | wu_avg_l,wu_sqr_l,wu_vol_avg_l,wu_vol_sqr_l ) 38 | !!!!!!!!!!!!!!!! 39 | !LIQUID 40 | !!!!!!!!!!!!!!!! 41 | call time_tw_avg( avg_dir,do_avg,do_favre,(/0,0,0/),'data/post/time_averaging/vorx_avg_fld_' //fldnum//'.out', & 42 | n,ng,istep,i_av,iout1d,nh_d,nh_v,0, & 43 | psi,rho,vorx, & 44 | vorx_avg_l,vorx_sqr_l,vorx_vol_avg_l,vorx_vol_sqr_l ) 45 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot/app.two_phase_inc_isot: -------------------------------------------------------------------------------- 1 | # Physics switches 2 | CONSTANT_COEFFS_POISSON ?= 1 3 | USE_VOF ?= 1 4 | VOF_DBG ?= 0 5 | HEAT_TRANSFER ?= 0 6 | DO_POSTPROC ?= 0 7 | TURB_FORCING ?= 0 8 | 9 | MAIN_SRC=main__two_phase_inc_isot.f90 10 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot/postp.two_phase_inc_isot/dropcheck.h90: -------------------------------------------------------------------------------- 1 | if(mod(istep,iout0d).eq.0) then 2 | ! 3 | ! compute the bubble/droplet trajectory and velocity 4 | ! 5 | xd = 0.0d0 6 | yd = 0.0d0 7 | zd = 0.0d0 8 | ut = 0.0d0 9 | vt = 0.0d0 10 | wt = 0.0d0 11 | ! 12 | do k=1,n(3) 13 | do j=1,n(2) 14 | do i=1,n(1) 15 | ! 16 | if(psi(i,j,k).gt.1.0e-8) then 17 | ! 18 | !zcd = (k -0.5d0)*dl(3) !- pos(3) 19 | !ycd = (j+coord(2)*n(2)-0.5d0)*dl(2) !- pos(2) 20 | !xcd = (i+coord(1)*n(1)-0.5d0)*dl(1) !- pos(1) 21 | zcd = (k+ijk_start(3)-0.5d0)*dl(3) !- pos(3) 22 | ycd = (j+ijk_start(2)-0.5d0)*dl(2) !- pos(2) 23 | xcd = (i+ijk_start(1)-0.5d0)*dl(1) !- pos(1) 24 | ! 25 | xd = xd + dl(1)*dl(2)*dl(3)*xcd*(psi(i,j,k)) 26 | yd = yd + dl(1)*dl(2)*dl(3)*ycd*(psi(i,j,k)) 27 | zd = zd + dl(1)*dl(2)*dl(3)*zcd*(psi(i,j,k)) 28 | ut = ut + dl(1)*dl(2)*dl(3)*(0.5d0*(u(i,j,k)+u(i-1,j,k)))*(psi(i,j,k)) ! along x 29 | vt = vt + dl(1)*dl(2)*dl(3)*(0.5d0*(v(i,j,k)+v(i,j-1,k)))*(psi(i,j,k)) ! along y 30 | wt = wt + dl(1)*dl(2)*dl(3)*(0.5d0*(w(i,j,k)+w(i,j,k-1)))*(psi(i,j,k)) ! along z 31 | ! 32 | endif 33 | ! 34 | enddo 35 | enddo 36 | enddo 37 | vol = sum(psi(1:n(1),1:n(2),1:n(3)))*dl(1)*dl(2)*dl(3) ! volume of the droplet 38 | call mpi_allreduce(MPI_IN_PLACE,xd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 39 | call mpi_allreduce(MPI_IN_PLACE,yd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 40 | call mpi_allreduce(MPI_IN_PLACE,zd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 41 | call mpi_allreduce(MPI_IN_PLACE,vol,1,mpi_real8,mpi_sum,comm_cart,ierr) 42 | call mpi_allreduce(MPI_IN_PLACE,ut ,1,mpi_real8,mpi_sum,comm_cart,ierr) 43 | call mpi_allreduce(MPI_IN_PLACE,vt ,1,mpi_real8,mpi_sum,comm_cart,ierr) 44 | call mpi_allreduce(MPI_IN_PLACE,wt ,1,mpi_real8,mpi_sum,comm_cart,ierr) 45 | xd = xd/vol 46 | yd = yd/vol 47 | zd = zd/vol 48 | ut = ut/vol 49 | vt = vt/vol 50 | wt = wt/vol 51 | if(myid.eq.0) then 52 | !call out0d('data/pos_vt.out',7,(/time/(lref/uref),xd/lx,yd/ly,zd/lz,ut/uref,vt/uref,wt/uref/)) 53 | !call out0d('data/pos_vt.out',7,(/time/(lref/uref),xd/lref,yd/lref,zd/lref,ut/uref,vt/uref,wt/uref/)) 54 | call out0d('data/pos_vt.out',7,(/time,xd,yd,zd,ut,vt,wt/)) 55 | endif 56 | ! 57 | endif 58 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot/postp.two_phase_inc_isot/out1d.h90: -------------------------------------------------------------------------------- 1 | ! out1d(fname,n,idir,z,dzlzi,p) 2 | ! 3 | ! writes the profile of a variable averaged 4 | ! over two domain directions (see output.f90) 5 | ! 6 | ! fname -> name of the file 7 | ! n -> size of the input array 8 | ! idir -> direction of the profile 9 | ! z -> z coordinate (grid is non-uniform in z) 10 | ! dzlzi -> dz/lz weight of a grid cell for averaging over z 11 | ! p -> 3D input scalar field 12 | ! 13 | ! modify the calls below as desired 14 | ! 15 | !call out1d(trim(datadir)//'umean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,u) 16 | !call out1d(trim(datadir)//'vmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,v) 17 | !call out1d(trim(datadir)//'wmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zf_g,dzc/lz,w) 18 | !call out1d(trim(datadir)//'umean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,u) 19 | !call out1d(trim(datadir)//'vmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,v) 20 | !call out1d(trim(datadir)//'wmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,w) 21 | !call out1d(trim(datadir)//'umean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,u) 22 | !call out1d(trim(datadir)//'vmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,v) 23 | !call out1d(trim(datadir)//'wmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,w) 24 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot/postp.two_phase_inc_isot/out2d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_2d(datadir,fname_bin,fname_log,varname,inorm,nslice,ng,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (each different plane that is saved should 10 | ! correspond to a different log file) 11 | ! varname -> name of the variable that is saved 12 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 13 | ! three components of the vector field 14 | ! inorm -> plane is perpendicular to direction inorm (1, 2, or 3) 15 | ! islice -> plane is of constant index islice in direction inorm 16 | ! ng -> array with the global number of points in each direction 17 | ! time -> physical time 18 | ! istep -> time step number 19 | ! p -> 3D input scalar field 20 | ! 21 | ! modify the calls below as desired 22 | ! 23 | call write_visu_2d(datadir,'u_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_X', & 24 | 2,ng(2)/2,ng,time,istep, & 25 | u(1:n(1),1:n(2),1:n(3))) 26 | call write_visu_2d(datadir,'v_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Y', & 27 | 2,ng(2)/2,ng,time,istep, & 28 | v(1:n(1),1:n(2),1:n(3))) 29 | call write_visu_2d(datadir,'w_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Z', & 30 | 2,ng(2)/2,ng,time,istep, & 31 | w(1:n(1),1:n(2),1:n(3))) 32 | call write_visu_2d(datadir,'p_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Pressure_P', & 33 | 2,ng(2)/2,ng,time,istep, & 34 | p(1:n(1),1:n(2),1:n(3))) 35 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot/postp.two_phase_inc_isot/out3d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_3d(datadir,fname_bin,fname_log,varname,nmin,nmax,nskip,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (can be the same for a time series of data with the same grid) 10 | ! varname -> name of the variable that is saved 11 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 12 | ! three components of the vector field 13 | ! nmin -> first element of the field that is saved in each direction, e.g. (/1,1,1/) 14 | ! nmax -> last element of the field that is saved in each direction, e.g. (/ng(1),ng(2),ng(3)/) 15 | ! nskip -> step size with which the grid points are saved, e.g. (/1,1,1/) if the whole array is saved 16 | ! time -> physical time 17 | ! istep -> time step number 18 | ! p -> 3D input scalar field 19 | ! 20 | ! modify the calls below as desired 21 | ! 22 | call write_visu_3d(datadir,'vex_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_X', & 23 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 24 | u(1:n(1),1:n(2),1:n(3))) 25 | call write_visu_3d(datadir,'vey_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Y', & 26 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 27 | v(1:n(1),1:n(2),1:n(3))) 28 | call write_visu_3d(datadir,'vez_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Z', & 29 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 30 | w(1:n(1),1:n(2),1:n(3))) 31 | call write_visu_3d(datadir,'pre_fld_'//fldnum//'.bin','log_visu_3d.out','Pressure_P', & 32 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 33 | p(1:n(1),1:n(2),1:n(3))) 34 | call write_visu_3d(datadir,'vof_fld_'//fldnum//'.bin','log_visu_3d.out','VoF', & 35 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 36 | psi(1:n(1),1:n(2),1:n(3))) 37 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot_turb/app.two_phase_inc_isot_turb: -------------------------------------------------------------------------------- 1 | # Physics switches 2 | CONSTANT_COEFFS_POISSON ?= 1 3 | USE_VOF ?= 1 4 | VOF_DBG ?= 0 5 | HEAT_TRANSFER ?= 0 6 | DO_POSTPROC ?= 1 7 | TURB_FORCING ?= 1 8 | 9 | MAIN_SRC=main__two_phase_inc_isot_turb.f90 10 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot_turb/postp.two_phase_inc_isot_turb/dropcheck.h90: -------------------------------------------------------------------------------- 1 | if(mod(istep,iout0d).eq.0) then 2 | ! 3 | ! compute the bubble/droplet trajectory and velocity 4 | ! 5 | xd = 0.0d0 6 | yd = 0.0d0 7 | zd = 0.0d0 8 | ut = 0.0d0 9 | vt = 0.0d0 10 | wt = 0.0d0 11 | ! 12 | do k=1,n(3) 13 | do j=1,n(2) 14 | do i=1,n(1) 15 | ! 16 | if(psi(i,j,k).gt.1.0e-8) then 17 | ! 18 | !zcd = (k -0.5d0)*dl(3) !- pos(3) 19 | !ycd = (j+coord(2)*n(2)-0.5d0)*dl(2) !- pos(2) 20 | !xcd = (i+coord(1)*n(1)-0.5d0)*dl(1) !- pos(1) 21 | zcd = (k+ijk_start(3)-0.5d0)*dl(3) !- pos(3) 22 | ycd = (j+ijk_start(2)-0.5d0)*dl(2) !- pos(2) 23 | xcd = (i+ijk_start(1)-0.5d0)*dl(1) !- pos(1) 24 | ! 25 | xd = xd + dl(1)*dl(2)*dl(3)*xcd*(psi(i,j,k)) 26 | yd = yd + dl(1)*dl(2)*dl(3)*ycd*(psi(i,j,k)) 27 | zd = zd + dl(1)*dl(2)*dl(3)*zcd*(psi(i,j,k)) 28 | ut = ut + dl(1)*dl(2)*dl(3)*(0.5d0*(u(i,j,k)+u(i-1,j,k)))*(psi(i,j,k)) ! along x 29 | vt = vt + dl(1)*dl(2)*dl(3)*(0.5d0*(v(i,j,k)+v(i,j-1,k)))*(psi(i,j,k)) ! along y 30 | wt = wt + dl(1)*dl(2)*dl(3)*(0.5d0*(w(i,j,k)+w(i,j,k-1)))*(psi(i,j,k)) ! along z 31 | ! 32 | endif 33 | ! 34 | enddo 35 | enddo 36 | enddo 37 | vol = sum(psi(1:n(1),1:n(2),1:n(3)))*dl(1)*dl(2)*dl(3) ! volume of the droplet 38 | call mpi_allreduce(MPI_IN_PLACE,xd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 39 | call mpi_allreduce(MPI_IN_PLACE,yd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 40 | call mpi_allreduce(MPI_IN_PLACE,zd ,1,mpi_real8,mpi_sum,comm_cart,ierr) 41 | call mpi_allreduce(MPI_IN_PLACE,vol,1,mpi_real8,mpi_sum,comm_cart,ierr) 42 | call mpi_allreduce(MPI_IN_PLACE,ut ,1,mpi_real8,mpi_sum,comm_cart,ierr) 43 | call mpi_allreduce(MPI_IN_PLACE,vt ,1,mpi_real8,mpi_sum,comm_cart,ierr) 44 | call mpi_allreduce(MPI_IN_PLACE,wt ,1,mpi_real8,mpi_sum,comm_cart,ierr) 45 | xd = xd/vol 46 | yd = yd/vol 47 | zd = zd/vol 48 | ut = ut/vol 49 | vt = vt/vol 50 | wt = wt/vol 51 | if(myid.eq.0) then 52 | !call out0d('data/pos_vt.out',7,(/time/(lref/uref),xd/lx,yd/ly,zd/lz,ut/uref,vt/uref,wt/uref/)) 53 | !call out0d('data/pos_vt.out',7,(/time/(lref/uref),xd/lref,yd/lref,zd/lref,ut/uref,vt/uref,wt/uref/)) 54 | call out0d('data/pos_vt.out',7,(/time,xd,yd,zd,ut,vt,wt/)) 55 | endif 56 | ! 57 | endif 58 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot_turb/postp.two_phase_inc_isot_turb/out1d.h90: -------------------------------------------------------------------------------- 1 | ! out1d(fname,n,idir,z,dzlzi,p) 2 | ! 3 | ! writes the profile of a variable averaged 4 | ! over two domain directions (see output.f90) 5 | ! 6 | ! fname -> name of the file 7 | ! n -> size of the input array 8 | ! idir -> direction of the profile 9 | ! z -> z coordinate (grid is non-uniform in z) 10 | ! dzlzi -> dz/lz weight of a grid cell for averaging over z 11 | ! p -> 3D input scalar field 12 | ! 13 | ! modify the calls below as desired 14 | ! 15 | !call out1d(trim(datadir)//'umean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,u) 16 | !call out1d(trim(datadir)//'vmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zc_g,dzf/lz,v) 17 | !call out1d(trim(datadir)//'wmean_z_fld_'//fldnum//'.out',n,dims,dl,3,nh_d,nh_u,zf_g,dzc/lz,w) 18 | !call out1d(trim(datadir)//'umean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,u) 19 | !call out1d(trim(datadir)//'vmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,v) 20 | !call out1d(trim(datadir)//'wmean_y_fld_'//fldnum//'.out',n,dims,dl,2,nh_d,nh_u,zc_g,dzf/lz,w) 21 | !call out1d(trim(datadir)//'umean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,u) 22 | !call out1d(trim(datadir)//'vmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,v) 23 | !call out1d(trim(datadir)//'wmean_x_fld_'//fldnum//'.out',n,dims,dl,1,nh_d,nh_u,zc_g,dzf/lz,w) 24 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot_turb/postp.two_phase_inc_isot_turb/out2d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_2d(datadir,fname_bin,fname_log,varname,inorm,nslice,ng,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (each different plane that is saved should 10 | ! correspond to a different log file) 11 | ! varname -> name of the variable that is saved 12 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 13 | ! three components of the vector field 14 | ! inorm -> plane is perpendicular to direction inorm (1, 2, or 3) 15 | ! islice -> plane is of constant index islice in direction inorm 16 | ! ng -> array with the global number of points in each direction 17 | ! time -> physical time 18 | ! istep -> time step number 19 | ! p -> 3D input scalar field 20 | ! 21 | ! modify the calls below as desired 22 | ! 23 | call write_visu_2d(datadir,'u_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_X', & 24 | 2,ng(2)/2,ng,time,istep, & 25 | u(1:n(1),1:n(2),1:n(3))) 26 | call write_visu_2d(datadir,'v_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Y', & 27 | 2,ng(2)/2,ng,time,istep, & 28 | v(1:n(1),1:n(2),1:n(3))) 29 | call write_visu_2d(datadir,'w_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Velocity_Z', & 30 | 2,ng(2)/2,ng,time,istep, & 31 | w(1:n(1),1:n(2),1:n(3))) 32 | call write_visu_2d(datadir,'p_slice_fld_'//fldnum//'.bin','log_visu_2d_slice_1.out','Pressure_P', & 33 | 2,ng(2)/2,ng,time,istep, & 34 | p(1:n(1),1:n(2),1:n(3))) 35 | -------------------------------------------------------------------------------- /src/apps/two_phase_inc_isot_turb/postp.two_phase_inc_isot_turb/out3d.h90: -------------------------------------------------------------------------------- 1 | ! 2 | ! write_visu_3d(datadir,fname_bin,fname_log,varname,nmin,nmax,nskip,time,istep,p) 3 | ! 4 | ! saves field data into a binary file and appends information about the data to a file 5 | ! the log file can be used to generate a xdmf file for visualization of field data 6 | ! 7 | ! datadir -> name of the directory where the data is saved 8 | ! fname_bin -> name of the output binary file 9 | ! fname_log -> name of the log file (can be the same for a time series of data with the same grid) 10 | ! varname -> name of the variable that is saved 11 | ! to create a vector, append _X _Y and _Z to the variable name, denoting the 12 | ! three components of the vector field 13 | ! nmin -> first element of the field that is saved in each direction, e.g. (/1,1,1/) 14 | ! nmax -> last element of the field that is saved in each direction, e.g. (/ng(1),ng(2),ng(3)/) 15 | ! nskip -> step size with which the grid points are saved, e.g. (/1,1,1/) if the whole array is saved 16 | ! time -> physical time 17 | ! istep -> time step number 18 | ! p -> 3D input scalar field 19 | ! 20 | ! modify the calls below as desired 21 | ! 22 | call write_visu_3d(datadir,'vex_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_X', & 23 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 24 | u(1:n(1),1:n(2),1:n(3))) 25 | call write_visu_3d(datadir,'vey_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Y', & 26 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 27 | v(1:n(1),1:n(2),1:n(3))) 28 | call write_visu_3d(datadir,'vez_fld_'//fldnum//'.bin','log_visu_3d.out','Velocity_Z', & 29 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 30 | w(1:n(1),1:n(2),1:n(3))) 31 | call write_visu_3d(datadir,'pre_fld_'//fldnum//'.bin','log_visu_3d.out','Pressure_P', & 32 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 33 | p(1:n(1),1:n(2),1:n(3))) 34 | call write_visu_3d(datadir,'vof_fld_'//fldnum//'.bin','log_visu_3d.out','VoF', & 35 | (/1,1,1/),(/ng(1),ng(2),ng(3)/),(/1,1,1/),time,istep, & 36 | psi(1:n(1),1:n(2),1:n(3))) 37 | -------------------------------------------------------------------------------- /src/chkdiv.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_chkdiv 5 | ! 6 | use mpi 7 | use mod_common_mpi, only: myid,ierr 8 | use mod_types 9 | !@cuf use cudafor 10 | ! 11 | implicit none 12 | ! 13 | private 14 | public :: chkdiv 15 | ! 16 | contains 17 | ! 18 | subroutine chkdiv(nx,ny,nz,dxi,dyi,dzi,nh_d,nh_u,dzfi,u,v,w,divtot,divmax) 19 | ! 20 | ! checks the divergence of the velocity field 21 | ! 22 | implicit none 23 | ! 24 | integer , intent(in ) :: nx,ny,nz 25 | real(rp), intent(in ) :: dxi,dyi,dzi 26 | integer , intent(in ) :: nh_d,nh_u 27 | real(rp), intent(in ), dimension(1-nh_d:) :: dzfi 28 | real(rp), intent(in ), dimension(1-nh_u:,1-nh_u:,1-nh_u:) :: u,v,w 29 | real(rp), intent(out) :: divtot,divmax 30 | ! 31 | real(rp) :: div 32 | !@cuf attributes(managed) :: u,v,w,dzfi 33 | integer :: i,j,k 34 | ! 35 | divtot = 0._rp 36 | divmax = 0._rp 37 | #if defined(_OPENACC) 38 | !$acc parallel loop collapse(3) reduction(max:divmax) reduction(+:divtot) 39 | #else 40 | !$OMP PARALLEL DO DEFAULT(none) & 41 | !$OMP SHARED(n,u,v,w,dxi,dyi,dzfi) & 42 | !$OMP PRIVATE(i,j,k,div) & 43 | !$OMP REDUCTION(+:divtot) & 44 | !$OMP REDUCTION(max:divmax) 45 | #endif 46 | do k=1,nz 47 | do j=1,ny 48 | do i=1,nx 49 | ! 50 | div = (w(i,j,k)-w(i,j,k-1))*dzfi(k) + & 51 | (v(i,j,k)-v(i,j-1,k))*dyi + & 52 | (u(i,j,k)-u(i-1,j,k))*dxi 53 | divmax = max(divmax,abs(div)) 54 | divtot = divtot + div 55 | ! 56 | enddo 57 | enddo 58 | enddo 59 | #if defined(_OPENACC) 60 | !$acc end parallel loop 61 | #else 62 | !$OMP END PARALLEL DO 63 | #endif 64 | call mpi_allreduce(MPI_IN_PLACE,divtot,1,MPI_REAL_RP,MPI_SUM,MPI_COMM_WORLD,ierr) 65 | call mpi_allreduce(MPI_IN_PLACE,divmax,1,MPI_REAL_RP,MPI_MAX,MPI_COMM_WORLD,ierr) 66 | if(myid.eq.0) print*, 'Total divergence = ', divtot, '| Maximum divergence = ', divmax 67 | ! 68 | return 69 | end subroutine chkdiv 70 | ! 71 | end module mod_chkdiv 72 | -------------------------------------------------------------------------------- /src/common_mpi.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_common_mpi 5 | ! 6 | use mpi 7 | use mod_types, only: rp 8 | ! 9 | implicit none 10 | ! 11 | integer :: myid 12 | integer :: comm_cart,ierr 13 | integer :: status(MPI_STATUS_SIZE) 14 | #if defined(_OPENACC) 15 | real(rp), allocatable, dimension(:,:) :: xsl_buf, xrl_buf, xsr_buf, xrr_buf, & 16 | ysr_buf, yrr_buf, ysl_buf, yrl_buf, & 17 | zsr_buf, zrr_buf, zsl_buf, zrl_buf 18 | !@cuf integer :: mydev 19 | ! 20 | #if defined(_GPU_MPI) 21 | attributes(device) :: xsl_buf, xrl_buf, xsr_buf, xrr_buf, & 22 | ysr_buf, yrr_buf, ysl_buf, yrl_buf, & 23 | zsr_buf, zrr_buf, zsl_buf, zrl_buf 24 | #else 25 | attributes(managed) :: xsl_buf, xrl_buf, xsr_buf, xrr_buf, & 26 | ysr_buf, yrr_buf, ysl_buf, yrl_buf, & 27 | zsr_buf, zrr_buf, zsl_buf, zrl_buf 28 | #endif 29 | #endif 30 | ! 31 | integer, dimension(3) :: ijk_start,ijk_start_x,ijk_start_y,ijk_start_z,n_x,n_y,n_z 32 | integer :: left,right,front,back,top,bottom,ipencil 33 | ! 34 | end module mod_common_mpi 35 | -------------------------------------------------------------------------------- /src/correc.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_correc 5 | ! 6 | use mod_types , only: rp 7 | !@cuf use cudafor 8 | ! 9 | implicit none 10 | ! 11 | private 12 | public :: correc 13 | ! 14 | contains 15 | ! 16 | subroutine correc(nx,ny,nz,nh_d,nh_u,dxi,dyi,dzi,dzci,dt,rho0,p,u,v,w,rho) 17 | ! 18 | ! corrects the velocity so that the prescribed divergence is imposed 19 | ! 20 | implicit none 21 | ! 22 | integer , intent(in ) :: nx,ny,nz 23 | integer , intent(in ) :: nh_d,nh_u 24 | real(rp), intent(in ) :: dxi,dyi,dzi 25 | real(rp), intent(in ), dimension(1-nh_d:) :: dzci 26 | real(rp), intent(in ) :: dt,rho0 27 | real(rp), intent(in ), dimension(0:,0:,0:) :: p 28 | real(rp), intent(inout), dimension(1-nh_u:,1-nh_u:,1-nh_u:) :: u,v,w 29 | real(rp), intent(in ), dimension(0:,0:,0:) :: rho 30 | ! 31 | real(rp) :: factori,factorj 32 | real(rp) :: rhox,rhoy,rhoz 33 | real(rp) :: rho0i 34 | integer :: i,j,k,ip,jp,kp 35 | !@cuf attributes(managed) :: u, v, w, rho, p, dzci 36 | ! 37 | rho0i = 1._rp/rho0 38 | ! 39 | factori = dt*dxi 40 | factorj = dt*dyi 41 | ! 42 | #if defined(_OPENACC) 43 | !$acc kernels 44 | #else 45 | !$OMP PARALLEL DO DEFAULT(none) & 46 | !$OMP SHARED(nx,ny,nz,factori,factorj,dzci,dt,p,u,v,w,rho) & 47 | !$OMP PRIVATE(i,j,k,ip,jp,kp,rho0i,rhox,rhoy,rhoz) 48 | #endif 49 | do k=1,nz 50 | do j=1,ny 51 | do i=1,nx 52 | ! 53 | ip = i+1 54 | jp = j+1 55 | kp = k+1 56 | ! 57 | #if defined(_CONSTANT_COEFFS_POISSON) 58 | u(i,j,k) = u(i,j,k) - factori*( p( ip,j,k)-p( i,j,k) )*rho0i 59 | v(i,j,k) = v(i,j,k) - factorj*( p( i,jp,k)-p( i,j,k) )*rho0i 60 | w(i,j,k) = w(i,j,k) - dt*dzci(k)*( p( i,j,kp)-p( i,j,k) )*rho0i 61 | #else 62 | rhox = 0.5_rp*(rho(ip,j,k)+rho(i,j,k)) 63 | rhoy = 0.5_rp*(rho(i,jp,k)+rho(i,j,k)) 64 | rhoz = 0.5_rp*(rho(i,j,kp)+rho(i,j,k)) 65 | ! 66 | u(i,j,k) = u(i,j,k) - factori*( p( ip,j,k)-p( i,j,k) )/rhox 67 | v(i,j,k) = v(i,j,k) - factorj*( p( i,jp,k)-p( i,j,k) )/rhoy 68 | w(i,j,k) = w(i,j,k) - dt*dzci(k)*( p( i,j,kp)-p( i,j,k) )/rhoz 69 | #endif 70 | ! 71 | enddo 72 | enddo 73 | enddo 74 | #if defined(_OPENACC) 75 | !$acc end kernels 76 | #else 77 | !$OMP END PARALLEL DO 78 | #endif 79 | ! 80 | return 81 | end subroutine correc 82 | ! 83 | end module mod_correc 84 | -------------------------------------------------------------------------------- /src/data/clean.sh: -------------------------------------------------------------------------------- 1 | rm -rf *.out 2 | rm -rf *.old 3 | rm -rf *.bin 4 | rm -rf post 5 | -------------------------------------------------------------------------------- /src/debug.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_debug 5 | ! 6 | use mpi 7 | use mod_common_mpi, only: myid,ierr 8 | use mod_types 9 | !@cuf use cudafor 10 | ! 11 | implicit none 12 | ! 13 | private 14 | public :: cmpt_mean 15 | ! 16 | contains 17 | ! 18 | subroutine cmpt_mean(nx,ny,nz,nh_d,nh_p,dx,dy,dzf,lx,ly,lz,p,mean) 19 | ! 20 | ! compute the mean value of an observable over the entire domain 21 | ! 22 | implicit none 23 | ! 24 | integer , intent(in ) :: nx,ny,nz 25 | integer , intent(in ) :: nh_d,nh_p 26 | real(rp), intent(in ) :: dx,dy 27 | real(rp), intent(in ), dimension(1-nh_d:) :: dzf 28 | real(rp), intent(in ) :: lx,ly,lz 29 | real(rp), intent(in ), dimension(1-nh_p:,1-nh_p:,1-nh_p:) :: p 30 | real(rp), intent(out) :: mean 31 | ! 32 | !@cuf attributes(managed) :: p, dzf 33 | integer :: i,j,k 34 | ! 35 | mean = 0._rp 36 | ! 37 | #if defined(_OPENACC) 38 | !$acc parallel loop collapse(3) reduction(+:mean) 39 | #else 40 | !$OMP PARALLEL DO DEFAULT(none) & 41 | !$OMP SHARED(n,p,dzf) & 42 | !$OMP PRIVATE(i,j,k) & 43 | !$OMP REDUCTION(+:mean) 44 | #endif 45 | do k=1,nz 46 | do j=1,ny 47 | do i=1,nx 48 | mean = mean + p(i,j,k)*dx*dy*dzf(k) 49 | enddo 50 | enddo 51 | enddo 52 | #if defined(_OPENACC) 53 | !$acc end parallel loop 54 | #else 55 | !$OMP END PARALLEL DO 56 | #endif 57 | ! 58 | call mpi_allreduce(MPI_IN_PLACE,mean,1,MPI_REAL_RP,MPI_SUM,MPI_COMM_WORLD,ierr) 59 | mean = mean/(1._rp*lx*ly*lz) 60 | ! 61 | return 62 | end subroutine cmpt_mean 63 | ! 64 | end module mod_debug 65 | -------------------------------------------------------------------------------- /src/fftw.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_fftw_param 5 | use mod_types 6 | use, intrinsic :: iso_c_binding 7 | #if defined(_OPENACC) 8 | use cufft 9 | #endif 10 | ! 11 | type, bind(C) :: fftw_iodim 12 | integer(C_INT) n, is, os 13 | end type fftw_iodim 14 | ! 15 | interface 16 | type(C_PTR) function fftw_plan_guru_r2r(rank,dims, & 17 | howmany_rank,howmany_dims,in,out,kind,flags) & 18 | #if defined(_SINGLE_PRECISION) 19 | bind(C, name='fftwf_plan_guru_r2r') 20 | #else 21 | bind(C, name='fftw_plan_guru_r2r') 22 | #endif 23 | import 24 | integer(C_INT), value :: rank 25 | type(fftw_iodim), dimension(*), intent(in) :: dims 26 | integer(C_INT), value :: howmany_rank 27 | type(fftw_iodim), dimension(*), intent(in) :: howmany_dims 28 | #if defined(_SINGLE_PRECISION) 29 | real(C_FLOAT ), dimension(*), intent(out) :: in,out 30 | #else 31 | real(C_DOUBLE), dimension(*), intent(out) :: in,out 32 | #endif 33 | integer(C_INT) :: kind 34 | integer(C_INT), value :: flags 35 | end function fftw_plan_guru_r2r 36 | end interface 37 | ! 38 | integer :: FFTW_PATIENT,FFTW_ESTIMATE 39 | parameter (FFTW_PATIENT=32) 40 | parameter (FFTW_ESTIMATE=64) 41 | integer FFTW_R2HC 42 | parameter (FFTW_R2HC=0) 43 | integer FFTW_HC2R 44 | parameter (FFTW_HC2R=1) 45 | ! 46 | integer REDFT00 47 | parameter (FFTW_REDFT00=3) 48 | integer FFTW_REDFT01 49 | parameter (FFTW_REDFT01=4) 50 | integer FFTW_REDFT10 51 | parameter (FFTW_REDFT10=5) 52 | integer FFTW_REDFT11 53 | parameter (FFTW_REDFT11=6) 54 | integer FFTW_RODFT00 55 | parameter (FFTW_RODFT00=7) 56 | integer FFTW_RODFT01 57 | parameter (FFTW_RODFT01=8) 58 | integer FFTW_RODFT10 59 | parameter (FFTW_RODFT10=9) 60 | integer FFTW_RODFT11 61 | parameter (FFTW_RODFT11=10) 62 | ! 63 | type(C_PTR) :: fwd_guruplan_y,bwd_guruplan_y 64 | type(C_PTR) :: fwd_guruplan_z,bwd_guruplan_z 65 | logical :: planned=.false. 66 | 67 | #if defined(_OPENACC) 68 | integer :: batch 69 | integer :: cufft_plan_fwd_x, cufft_plan_bwd_x 70 | integer :: cufft_plan_fwd_y, cufft_plan_bwd_y 71 | complex(rp),device,allocatable,dimension(:) :: cufft_workspace 72 | integer :: CUFFT_FWD_TYPE,CUFFT_BWD_TYPE 73 | #if defined(_SINGLE_PRECISION) 74 | parameter(CUFFT_FWD_TYPE = CUFFT_R2C) 75 | parameter(CUFFT_BWD_TYPE = CUFFT_C2R) 76 | #else 77 | parameter(CUFFT_FWD_TYPE = CUFFT_D2Z) 78 | parameter(CUFFT_BWD_TYPE = CUFFT_Z2D) 79 | #endif 80 | #endif 81 | 82 | end module mod_fftw_param 83 | -------------------------------------------------------------------------------- /src/fillps.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_fillps 5 | ! 6 | use mod_types, only: rp 7 | !@cuf use cudafor 8 | ! 9 | implicit none 10 | ! 11 | private 12 | public :: fillps 13 | ! 14 | contains 15 | ! 16 | subroutine fillps(nx,ny,nz,nh_d,nh_u,dxi,dyi,dzi,dzfi,dti,rho0,u,v,w,p) 17 | ! 18 | implicit none 19 | ! 20 | integer , intent(in ) :: nx,ny,nz 21 | integer , intent(in ) :: nh_d,nh_u 22 | real(rp), intent(in ) :: dxi,dyi,dzi 23 | real(rp), intent(in ), dimension(1-nh_d:) :: dzfi 24 | real(rp), intent(in ) :: dti,rho0 25 | real(rp), intent(in ), dimension(1-nh_u:,1-nh_u:,1-nh_u:) :: u,v,w 26 | real(rp), intent(out), dimension(0:,0:,0:) :: p 27 | ! 28 | real(rp) :: dtidxi,dtidyi 29 | integer :: i,j,k,im,jm,km 30 | !@cuf attributes(managed) :: p, u, v, w, dzfi 31 | ! 32 | dtidxi = dti*dxi 33 | dtidyi = dti*dyi 34 | ! 35 | #if defined(_OPENACC) 36 | !$acc kernels 37 | #else 38 | !$OMP PARALLEL DO DEFAULT(none) & 39 | !$OMP SHARED(nx,ny,nz,rho0,p,u,v,w,dtidxi,dtidyi,dti,dzci) & 40 | !$OMP PRIVATE(i,j,k,im,jm,km) 41 | #endif 42 | do k=1,nz 43 | do j=1,ny 44 | do i=1,nx 45 | ! 46 | im = i-1 47 | jm = j-1 48 | km = k-1 49 | ! 50 | p(i,j,k) = & 51 | ((w(i,j,k)-w(i,j,km))*dti*dzfi(k) + & 52 | (v(i,j,k)-v(i,jm,k))*dtidyi + & 53 | (u(i,j,k)-u(im,j,k))*dtidxi ) 54 | ! 55 | #if defined(_CONSTANT_COEFFS_POISSON) 56 | p(i,j,k) = p(i,j,k)*rho0 57 | #endif 58 | ! 59 | enddo 60 | enddo 61 | enddo 62 | #if defined(_OPENACC) 63 | !$acc end kernels 64 | #else 65 | !$OMP END PARALLEL DO 66 | #endif 67 | ! 68 | return 69 | end subroutine fillps 70 | ! 71 | end module mod_fillps 72 | -------------------------------------------------------------------------------- /src/funcs.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_funcs 5 | ! 6 | use mod_param, only: pi 7 | use mod_types, only: rp 8 | ! 9 | implicit none 10 | ! 11 | private 12 | public ssign,heaviside,dirac,efun,interp_d,interp_g 13 | ! 14 | contains 15 | ! 16 | function ssign(delta,phi) 17 | ! 18 | ! smooth sign function 19 | ! 20 | implicit none 21 | ! 22 | real(rp), intent(in) :: delta,phi 23 | real(rp) :: ssign 24 | ! 25 | ssign = sign(1._rp,phi) 26 | if(abs(phi).le.delta) then 27 | ssign = phi/(sqrt(phi**2+delta**2)) 28 | endif 29 | ! 30 | return 31 | end function ssign 32 | ! 33 | function heaviside(r,eps) 34 | ! 35 | ! smooth step function based 36 | ! on cosine/sine 37 | ! 38 | implicit none 39 | ! 40 | real(rp), intent(in) :: r,eps 41 | real(rp) :: heaviside 42 | ! 43 | if(r.lt.-eps) then 44 | heaviside = 0._rp 45 | elseif(r.lt.eps) then 46 | heaviside = 0.5_rp + 0.5_rp*r/eps + 0.5_rp/pi*sin(pi*r/eps) 47 | else 48 | heaviside = 1._rp 49 | endif 50 | ! 51 | return 52 | end function heaviside 53 | ! 54 | function dirac(r,eps) 55 | ! 56 | ! smooth impulse function 57 | ! (note: the derivative of heaviside function w.r.t. eps) 58 | ! 59 | implicit none 60 | ! 61 | real(rp), intent(in) :: r,eps 62 | real(rp) :: dirac 63 | ! 64 | if(abs(r).ge.eps) then 65 | dirac = 0._rp 66 | else 67 | dirac = 0.5_rp/eps + 0.5_rp/eps*cos(pi*r/eps) 68 | endif 69 | ! 70 | return 71 | end function dirac 72 | ! 73 | function efun(r,eps) 74 | ! 75 | ! smooth step function based 76 | ! on the error function 77 | ! 78 | implicit none 79 | ! 80 | real(rp), intent(in) :: r,eps 81 | real(rp) :: efun 82 | ! 83 | efun = 0.5_rp*( 1._rp+erf(r/eps) ) 84 | ! 85 | return 86 | end function efun 87 | ! 88 | function dir_efun(r,eps) 89 | ! 90 | ! smooth impulse function based 91 | ! on the error function 92 | ! (note: the derivative of error function w.r.t. eps) 93 | ! 94 | implicit none 95 | ! 96 | real(rp), intent(in) :: r,eps 97 | real(rp) :: dir_efun 98 | ! 99 | dir_efun = (1._rp/(eps*pi))*exp(-(r/eps)**2._rp) 100 | ! 101 | return 102 | end function dir_efun 103 | ! 104 | function interp_d(vec,vel,q) result(flux) ! divergence form 105 | ! 106 | ! QUICK discretization (by Leonard) in divergence form 107 | ! (see, e.g.: Numerical simulation of incompressible flows, pag. 112) 108 | ! 109 | implicit none 110 | ! 111 | real(rp), intent(in), dimension(-2:2) :: vec 112 | real(rp), intent(in) :: vel ! the transporting velocity (in d(uu)/dx is u, in d(uv)/dy is v) 113 | integer , intent(in) :: q ! (q=-1 if f_i, q=0 if f_i+1 with f is the flux) 114 | ! 115 | real(rp) :: flux 116 | ! 117 | !flux = 0.5_rp*vel*(vec(0+q)+vec(1+q)) + 0.5_rp*abs(vel)*(+vec(0+q)-vec(1+q)) 118 | ! 119 | flux = 0.0625_rp*(vel)*(-vec(-1+q)+9.0_rp*vec(0+q)+9.0_rp*vec(1+q)-vec(2+q)) & 120 | + 0.0625_rp*abs(vel)*(-vec(-1+q)+3.0_rp*vec(0+q)-3.0_rp*vec(1+q)+vec(2+q)) 121 | ! 122 | return 123 | end function interp_d 124 | ! 125 | function interp_g(vec,vel,dli) result(vel_grad) ! gradient form 126 | ! 127 | ! II order (Kawamura and Kuwahara equation) discretization in gradient form 128 | ! (see, e.g.: Numerical simulation of incompressible flows, pag. 113) 129 | ! 130 | implicit none 131 | ! 132 | real(rp), intent(in), dimension(-2:2) :: vec 133 | real(rp), intent(in) :: vel ! the transporting velocity (in ud(f)/dx is u, in vd(f)/dy is v) 134 | real(rp), intent(in) :: dli 135 | ! 136 | real(rp) :: vel_grad 137 | ! 138 | !vel_grad = 0.5_rp*(vel+abs(vel))*(vec(0)-vec(-1))*dli + 0.5_rp*(vel-abs(vel))*(vec(1)-vec(0))*dli 139 | ! 140 | vel_grad = (vel)*(vec(-2)-8.0_rp*vec(-1) +8.0_rp*vec(+1)-vec(+2))*(1.0_rp/12.0_rp)*dli & 141 | + 3.0_rp*abs(vel)*(vec(-2)-4.0_rp*vec(-1)+6.0_rp*vec(+0)-4.0_rp*vec(+1)+vec(+2))*(1.0_rp/12.0_rp)*dli 142 | ! 143 | return 144 | end function interp_g 145 | ! 146 | end module mod_funcs 147 | -------------------------------------------------------------------------------- /src/initgrid.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_initgrid 5 | ! 6 | use mod_param, only: pi 7 | use mod_types 8 | ! 9 | implicit none 10 | ! 11 | private 12 | public :: initgrid 13 | ! 14 | contains 15 | ! 16 | ! [NOTE] This could be done on GPU but is it worth it? 17 | subroutine initgrid(inivel,n,gr,lz,nh_d,dzc,dzf,zc,zf) 18 | ! 19 | ! initializes the non-uniform grid in z 20 | ! 21 | implicit none 22 | ! 23 | character(len=3), intent(in ) :: inivel 24 | integer , intent(in ) :: n 25 | real(rp) , intent(in ) :: gr,lz 26 | integer , intent(in ) :: nh_d 27 | real(rp) , intent(out), dimension(1-nh_d:n+nh_d) :: dzc,dzf,zc,zf 28 | ! 29 | real(rp) :: z0 30 | integer :: k 31 | procedure (), pointer :: gridpoint => null() 32 | select case(inivel) 33 | case('zer','log','poi','cou') 34 | gridpoint => gridpoint_cluster_two_end 35 | case('hcl','hcp') 36 | gridpoint => gridpoint_cluster_one_end 37 | case default 38 | gridpoint => gridpoint_cluster_two_end 39 | end select 40 | ! 41 | ! step 1) determine coordinates of cell faces zf 42 | ! 43 | do k=1,n 44 | z0 = (k-0.0_rp)/(1.0_rp*n) 45 | call gridpoint(gr,z0,zf(k)) 46 | zf(k) = zf(k)*lz 47 | enddo 48 | zf(0) = 0.0_rp 49 | ! 50 | ! step 2) determine grid spacing between faces dzf 51 | ! 52 | do k=1,n 53 | dzf(k) = zf(k)-zf(k-1) 54 | enddo 55 | dzf(0 ) = dzf(1) 56 | dzf(n+1) = dzf(n) 57 | ! 58 | ! step 3) determine grid spacing between centers dzc 59 | ! 60 | do k=0,n 61 | dzc(k) = 0.5_rp*(dzf(k)+dzf(k+1)) 62 | enddo 63 | dzc(n+1) = dzc(n) 64 | ! 65 | ! step 4) compute coordinates of cell centers zc and faces zf 66 | ! 67 | zc(0) = -dzc(0)/2.0_rp 68 | zf(0) = 0.0_rp 69 | do k=1,n+1 70 | zc(k) = zc(k-1) + dzc(k-1) 71 | zf(k) = zf(k-1) + dzf(k) 72 | enddo 73 | ! 74 | ! step 5) extension to 0,-1,... and n+1,n+2,... for dzf and dzc 75 | ! 76 | do k=1-nh_d,0 77 | dzf(k) = dzf(-k+1) 78 | dzc(k) = dzc(-k ) 79 | enddo 80 | do k=n+1,n+nh_d 81 | dzf(k) = dzf(2*n-k-1) 82 | dzc(k) = dzc(2*n-k ) 83 | enddo 84 | ! 85 | ! step 6) extension to 0,-1,... and n+1,n+2,... for zf and zc 86 | ! 87 | do k=0,1-nh_d,-1 88 | zf(k) = zf(k+1)-dzf(k ) 89 | zc(k) = zf(k+1)-dzc(k+1) 90 | enddo 91 | do k=n+1,n+nh_d 92 | zf(k) = zf(k-1)+dzf(k ) 93 | zc(k) = zf(k-1)+dzc(k-1) 94 | enddo 95 | ! 96 | return 97 | end subroutine initgrid 98 | ! 99 | ! grid stretching functions 100 | ! see e.g., Fluid Flow Phenomena -- A Numerical Toolkit, by P. Orlandi 101 | ! 102 | subroutine gridpoint_cluster_two_end(alpha,z0,z) 103 | ! 104 | ! clustered at the two sides 105 | ! 106 | implicit none 107 | ! 108 | real(rp), intent(in ) :: alpha,z0 109 | real(rp), intent(out) :: z 110 | ! 111 | if(alpha.ne.0._rp) then 112 | z = 0.5_rp*(1._rp+tanh((z0-0.5_rp)*alpha)/tanh(alpha/2.0_rp)) 113 | else 114 | z = z0 115 | endif 116 | ! 117 | return 118 | end subroutine gridpoint_cluster_two_end 119 | ! 120 | subroutine gridpoint_cluster_one_end(alpha,z0,z) 121 | ! 122 | ! clustered at the lower side 123 | ! 124 | implicit none 125 | ! 126 | real(rp), intent(in ) :: alpha,z0 127 | real(rp), intent(out) :: z 128 | ! 129 | if(alpha.ne.0.0_rp) then 130 | z = 1.0_rp*(1.0_rp+tanh((z0-1.0_rp)*alpha)/tanh(alpha/1.0_rp)) 131 | else 132 | z = z0 133 | endif 134 | ! 135 | return 136 | end subroutine gridpoint_cluster_one_end 137 | ! 138 | subroutine gridpoint_cluster_middle(alpha,z0,z) 139 | ! 140 | ! clustered in the middle 141 | ! 142 | implicit none 143 | ! 144 | real(rp), intent(in ) :: alpha,z0 145 | real(rp), intent(out) :: z 146 | ! 147 | if(alpha.ne.0.0_rp) then 148 | if( z0.le.0.5_rp) then 149 | z = 0.5_rp*(1.0_rp-1.0_rp+tanh(2.0_rp*alpha*(z0-0.0_rp))/tanh(alpha)) 150 | elseif(z0.gt.0.5) then 151 | z = 0.5_rp*(1.0_rp+1.0_rp+tanh(2.0_rp*alpha*(z0-1.0_rp))/tanh(alpha)) 152 | endif 153 | else 154 | z = z0 155 | endif 156 | ! 157 | return 158 | end subroutine gridpoint_cluster_middle 159 | ! 160 | end module mod_initgrid 161 | -------------------------------------------------------------------------------- /src/make.deps: -------------------------------------------------------------------------------- 1 | # postprocessing 2 | postprocessing/fftw_spectra.o : common_mpi.o 3 | postprocessing/fftw_spectra.o : types.o 4 | postprocessing/post.o : common_mpi.o 5 | postprocessing/post.o : apps/$(APP)/param.o 6 | postprocessing/post.o : types.o 7 | postprocessing/postMain.o : bound.o 8 | postprocessing/postMain.o : common_mpi.o 9 | postprocessing/postMain.o : initgrid.o 10 | postprocessing/postMain.o : initmpi.o 11 | postprocessing/postMain.o : load.o 12 | postprocessing/postMain.o : output.o 13 | postprocessing/postMain.o : apps/$(APP)/param.o 14 | postprocessing/postMain.o : sanity.o 15 | postprocessing/postMain.o : solver_gpu.o 16 | postprocessing/postMain.o : types.o 17 | postprocessing/postMain.o : vof.o 18 | postprocessing/postMain.o : 2decomp/decomp_2d.o 19 | postprocessing/postMain.o : post.o 20 | postprocessing/postMain.o : spectra.o 21 | postprocessing/postMain.o : tagging.o 22 | postprocessing/post_other.o : bound.o 23 | postprocessing/post_other.o : common_mpi.o 24 | postprocessing/post_other.o : apps/$(APP)/param.o 25 | postprocessing/post_other.o : types.o 26 | postprocessing/post_zpencil.o : bound.o 27 | postprocessing/post_zpencil.o : common_mpi.o 28 | postprocessing/post_zpencil.o : apps/$(APP)/param.o 29 | postprocessing/post_zpencil.o : types.o 30 | postprocessing/spectra.o : common_mpi.o 31 | postprocessing/spectra.o : apps/$(APP)/param.o 32 | postprocessing/spectra.o : types.o 33 | postprocessing/spectra.o : 2decomp/decomp_2d.o 34 | postprocessing/spectra.o : fftw_spectra.o 35 | postprocessing/tagging.o : bound.o 36 | postprocessing/tagging.o : common_mpi.o 37 | postprocessing/tagging.o : apps/$(APP)/param.o 38 | postprocessing/tagging.o : types.o 39 | 40 | # 2decomp 41 | 2decomp/decomp_2d.o : profiler.o 42 | 2decomp/io.o : 2decomp/decomp_2d.o 43 | 44 | # src 45 | bound.o : common_mpi.o 46 | bound.o : profiler.o 47 | bound.o : types.o 48 | chkdiv.o : common_mpi.o 49 | chkdiv.o : types.o 50 | chkdt.o : common_mpi.o 51 | chkdt.o : apps/$(APP)/param.o 52 | chkdt.o : types.o 53 | cmpt_divth.o : common_mpi.o 54 | cmpt_divth.o : apps/$(APP)/param.o 55 | cmpt_divth.o : types.o 56 | common_mpi.o : types.o 57 | correc.o : common_mpi.o 58 | correc.o : types.o 59 | debug.o : common_mpi.o 60 | debug.o : types.o 61 | fft.o : common_mpi.o 62 | fft.o : fftw.o 63 | fft.o : apps/$(APP)/param.o 64 | fft.o : types.o 65 | fftw.o : types.o 66 | fillps.o : types.o 67 | funcs.o : apps/$(APP)/param.o 68 | funcs.o : types.o 69 | gradls.o : types.o 70 | initflow.o : common_mpi.o 71 | initflow.o : apps/$(APP)/param.o 72 | initflow.o : sanity.o 73 | initflow.o : types.o 74 | initgrid.o : apps/$(APP)/param.o 75 | initgrid.o : types.o 76 | initmpi.o : 2decomp/decomp_2d.o 77 | initmpi.o : common_mpi.o 78 | initmpi.o : types.o 79 | initsolver.o : 2decomp/decomp_2d.o 80 | initsolver.o : common_mpi.o 81 | initsolver.o : fft.o 82 | initsolver.o : apps/$(APP)/param.o 83 | initsolver.o : types.o 84 | load.o : 2decomp/decomp_2d.o 85 | load.o : 2decomp/io.o 86 | load.o : common_mpi.o 87 | load.o : apps/$(APP)/param.o 88 | load.o : sanity.o 89 | load.o : types.o 90 | mom.o : funcs.o 91 | mom.o : apps/$(APP)/param.o 92 | mom.o : types.o 93 | moms.o : common_mpi.o 94 | moms.o : gradls.o 95 | moms.o : profiler.o 96 | moms.o : apps/$(APP)/param.o 97 | moms.o : types.o 98 | output.o : 2decomp/io.o 99 | output.o : common_mpi.o 100 | output.o : types.o 101 | apps/$(APP)/param.o : types.o 102 | rk.o : mom.o 103 | rk.o : types.o 104 | rk.o : sanity.o 105 | rks.o : common_mpi.o 106 | rks.o : moms.o 107 | rks.o : apps/$(APP)/param.o 108 | rks.o : types.o 109 | sanity.o : 2decomp/decomp_2d.o 110 | sanity.o : bound.o 111 | sanity.o : chkdiv.o 112 | sanity.o : common_mpi.o 113 | sanity.o : correc.o 114 | sanity.o : fft.o 115 | sanity.o : fillps.o 116 | sanity.o : initmpi.o 117 | sanity.o : initsolver.o 118 | sanity.o : apps/$(APP)/param.o 119 | sanity.o : solver_gpu.o 120 | sanity.o : solver_cpu.o 121 | sanity.o : apps/$(APP)/param.o 122 | sanity.o : types.o 123 | solver_gpu.o : 2decomp/decomp_2d.o 124 | solver_gpu.o : common_mpi.o 125 | solver_gpu.o : fft.o 126 | solver_gpu.o : fftw.o 127 | solver_gpu.o : types.o 128 | solver_cpu.o : 2decomp/decomp_2d.o 129 | solver_cpu.o : common_mpi.o 130 | solver_cpu.o : fft.o 131 | solver_cpu.o : profiler.o 132 | solver_cpu.o : types.o 133 | source.o : common_mpi.o 134 | source.o : apps/$(APP)/param.o 135 | source.o : sanity.o 136 | source.o : types.o 137 | vof.o : bound.o 138 | vof.o : common_mpi.o 139 | vof.o : profiler.o 140 | vof.o : apps/$(APP)/param.o 141 | vof.o : sanity.o 142 | vof.o : types.o 143 | -------------------------------------------------------------------------------- /src/postprocessing/readme.txt: -------------------------------------------------------------------------------- 1 | The directory "postprocessing" contains all the postprocessing subroutines. 2 | -------------------------------------------------------------------------------- /src/rks.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_rks 5 | ! 6 | use mod_common_mpi 7 | #if defined(_USE_VOF) 8 | use mod_moms, only: momtad_tw 9 | #else 10 | use mod_moms, only: momtad_sp 11 | #endif 12 | use mod_types 13 | !@cuf use cudafor 14 | ! 15 | implicit none 16 | ! 17 | private 18 | public :: rk_sca 19 | ! 20 | contains 21 | ! 22 | subroutine rk_sca(f_t1,f_t2,nx,ny,nz,dxi,dyi,dzi,nh_d,nh_u,nh_t,rho,cpp,kappa,dzci,dzfi, & 23 | u,v,w,tmp,dtmpdtrk,dtmpdtrkold) 24 | ! 25 | ! subroutine to compute the temperature (or generic scalar) 26 | ! 27 | ! Note: --> suitable for both 2nd order Adams-Bashforth and 3rd low-storage Runge-Kutta; 28 | ! --> source terms are included in the main program; 29 | ! 30 | implicit none 31 | ! 32 | real(rp), intent(in ) :: f_t1,f_t2 33 | integer, intent(in ) :: nx,ny,nz 34 | real(rp), intent(in ) :: dxi,dyi,dzi 35 | integer , intent(in ) :: nh_d,nh_u,nh_t 36 | real(rp), intent(in ), dimension( 0:, 0:, 0:) :: rho,cpp,kappa 37 | real(rp), intent(in ), dimension(1-nh_d:) :: dzci,dzfi 38 | real(rp), intent(in ), dimension(1-nh_u:,1-nh_u:,1-nh_u:) :: u,v,w 39 | real(rp), intent(inout), dimension(1-nh_t:,1-nh_t:,1-nh_t:) :: tmp 40 | real(rp), intent(out ), dimension( :, :, :) :: dtmpdtrk 41 | real(rp), intent(inout), dimension( :, :, :) :: dtmpdtrkold 42 | ! 43 | integer :: i,j,k 44 | ! 45 | !@cuf attributes(managed) :: rho, cpp, kappa, u, v, w, tmp, dtmpdtrk, dtmpdtrkold 46 | !@cuf attributes(managed) :: dzci, dzfi 47 | ! 48 | #if defined(_USE_VOF) 49 | call momtad_tw(nx,ny,nz,dxi,dyi,dzi,nh_d,nh_u,nh_t,kappa,cpp,rho, & 50 | dzci,dzfi,tmp,u,v,w,dtmpdtrk) 51 | #else 52 | call momtad_sp(nx,ny,nz,dxi,dyi,dzi,nh_d,nh_u,nh_t, & 53 | dzci,dzfi,tmp,u,v,w,dtmpdtrk) 54 | #endif 55 | ! 56 | #if defined(_OPENACC) 57 | !$acc kernels 58 | #else 59 | !$OMP PARALLEL DO DEFAULT(none) & 60 | !$OMP PRIVATE(i,j,k) & 61 | !$OMP SHARED(nx,ny,nz,f_t1,f_t2,tmp,dtmpdtrk,dtmpdtrkold) 62 | !$OMP SHARED(dzci,dzfi) 63 | #endif 64 | do k=1,nz 65 | do j=1,ny 66 | do i=1,nx 67 | ! 68 | tmp(i,j,k) = tmp(i,j,k) + f_t1*dtmpdtrk(i,j,k) + f_t2*dtmpdtrkold(i,j,k) 69 | ! 70 | enddo 71 | enddo 72 | enddo 73 | #if defined(_OPENACC) 74 | !$acc end kernels 75 | #else 76 | !$OMP END PARALLEL DO 77 | #endif 78 | ! 79 | #if defined(_OPENACC) 80 | !$acc kernels 81 | #else 82 | !$OMP PARALLEL DO DEFAULT(none) & 83 | !$OMP PRIVATE(i,j,k) & 84 | !$OMP SHARED(n,dtmpdtrk,dtmpdtrkold) 85 | #endif 86 | do k=1,nz 87 | do j=1,ny 88 | do i=1,nx 89 | ! 90 | dtmpdtrkold(i,j,k) = dtmpdtrk(i,j,k) 91 | ! 92 | enddo 93 | enddo 94 | enddo 95 | #if defined(_OPENACC) 96 | !$acc end kernels 97 | #else 98 | !$OMP END PARALLEL DO 99 | #endif 100 | ! 101 | return 102 | end subroutine rk_sca 103 | ! 104 | end module mod_rks 105 | -------------------------------------------------------------------------------- /src/targets/target.generic-cray: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # CRAY (cpu only) 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := g++ 7 | FC := ftn 8 | ifeq ($(DO_DBG),1) 9 | DBG := -g -G0 10 | else 11 | OPT := -O3 12 | endif 13 | PRE := -fallow-argument-mismatch # this option seems required to succesfully compile 2decomp on CRAY machines 14 | 15 | # Take all the compiler flags together 16 | FFLAGS := $(OPT) $(DBG) $(PRE) 17 | DFLAGS := -D_TIMING -D_EPA2A -D_DECOMP_X #-D_TWOD 18 | DFLAGS += -D_OVERWRITE -D_EVEN # FFLAGS_2DECOMP 19 | LDFLAGS := 20 | 21 | # Architecture switches 22 | USE_NVTX = 0 23 | 24 | # Required for FFTW 25 | FFTW_HOME := /data/nicolos/numerical_libraries/fftw-3.3.10 26 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 27 | 28 | # Required for NVTX 29 | # NVTX_LIB += 30 | -------------------------------------------------------------------------------- /src/targets/target.generic-gnu: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # GNU (cpu only) 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := g++ 7 | FC := mpif90 8 | ifeq ($(DO_DBG),1) 9 | DBG := -O0 -g -fbacktrace -ffpe-trap=invalid -Wall -Wextra -pedantic -fcheck=all -finit-real=snan 10 | else 11 | OPT := -O3 -march=native #-ffast-math # "-ffast-math" leads to code failure in some GNU versions. If it happens, simply use "OPT := -O3 -march=native" or "OPT := -O3" 12 | endif 13 | PRE := #-fdefault-real-8 -fdefault-double-8 14 | 15 | # Take all the compiler flags together 16 | FFLAGS := $(OPT) $(DBG) $(PRE) 17 | DFLAGS := -D_TIMING -D_EPA2A -D_DECOMP_X #-D_TWOD 18 | DFLAGS += -D_OVERWRITE -D_EVEN # FFLAGS_2DECOMP 19 | LDFLAGS := 20 | 21 | # Architecture switches 22 | USE_NVTX = 0 23 | 24 | # Required for FFTW 25 | FFTW_HOME := /data/nicolos/numerical_libraries/fftw-3.3.10 26 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 27 | 28 | # Required for NVTX 29 | # NVTX_LIB += 30 | -------------------------------------------------------------------------------- /src/targets/target.generic-gpu: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # NVFORTRAN (cpu/gpu version) 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := nvc++ 7 | FC := mpif90 8 | ifeq ($(DO_DBG),1) 9 | DBG := -Minform=inform -Mchkstk -g -O0 -traceback -Mbounds 10 | else 11 | OPT := -Minfo=accel -fast 12 | endif 13 | PRE := #-r8 14 | 15 | # Take all the compiler flags together 16 | FFLAGS := $(OPT) $(DBG) $(PRE) 17 | DFLAGS := -D_TIMING -D_EPA2A -D_DECOMP_X #-D_TWOD 18 | DFLAGS += #-D_OVERWRITE -D_EVEN # FFLAGS_2DECOMP 19 | LDFLAGS := 20 | 21 | # Architecture switches 22 | # 23 | # NOTE: GPU compilation is enabled with FFLAGS and DFLAGS (see below) 24 | # 25 | USE_NVTX = 0 26 | 27 | # Required for FFTW 28 | FFTW_HOME := /data/nicolos/numerical_libraries/fftw-3.3.10 29 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 30 | 31 | FFLAGS += -cuda -acc -gpu=cc70,cuda11.0 -cudalib=cufft,curand 32 | DFLAGS += -D_EPHC -D_GPU_MPI 33 | 34 | # Required for NVTX 35 | NVTX_LIB += -lnvToolsExt 36 | -------------------------------------------------------------------------------- /src/targets/target.generic-intel: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # INTEL (cpu only) 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := g++ 7 | FC := mpiifort 8 | ifeq ($(DO_DBG),1) 9 | DBG := -O0 -g -traceback -fpe0 -check bounds -check uninit #-check all -debug all 10 | else 11 | OPT := -O3 -ipo -xHost 12 | endif 13 | PRE := #-r8 14 | 15 | # Take all the compiler flags together 16 | FFLAGS := $(OPT) $(DBG) $(PRE) 17 | DFLAGS := -D_TIMING -D_EPA2A -D_DECOMP_X #-D_TWOD 18 | DFLAGS += -D_OVERWRITE -D_EVEN # FFLAGS_2DECOMP 19 | LDFLAGS := 20 | 21 | # Architecture switches 22 | USE_NVTX = 0 23 | 24 | # Required for FFTW 25 | FFTW_HOME := /data/nicolos/numerical_libraries/fftw-3.3.10 26 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 27 | 28 | # Required for NVTX 29 | # NVTX_LIB += 30 | -------------------------------------------------------------------------------- /src/targets/target.generic-nvf: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # NVFORTRAN (cpu only) 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := nvc++ 7 | FC := mpif90 8 | ifeq ($(DO_DBG),1) 9 | DBG := -O0 -g -traceback -Mstandard -Minform=inform -Mbackslash -Mbounds -Mchkptr -Mchkstk 10 | else 11 | OPT := -Minfo=accel -fast 12 | endif 13 | PRE := #-r8 14 | 15 | # Take all the compiler flags together 16 | FFLAGS := $(OPT) $(DBG) $(PRE) 17 | DFLAGS := -D_TIMING -D_EPA2A -D_DECOMP_X #-D_TWOD 18 | DFLAGS += -D_OVERWRITE -D_EVEN # FFLAGS_2DECOMP 19 | LDFLAGS := 20 | 21 | # Architecture switches 22 | USE_NVTX = 0 23 | 24 | # Required for FFTW 25 | FFTW_HOME := /data/nicolos/numerical_libraries/fftw-3.3.10 26 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 27 | 28 | # Required for NVTX 29 | # NVTX_LIB += 30 | -------------------------------------------------------------------------------- /src/targets/target.raplab-cpu_gnu: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # GNU CPU version 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := g++ 7 | FC := mpif90 8 | OPT := -O3 -ffast-math -march=native 9 | DBG := 10 | PRE := #-fdefault-real-8 -fdefault-double-8 11 | 12 | # Take all the compiler flags together 13 | FFLAGS := $(OPT) $(PRE) 14 | #FFLAGS := $(DBG) $(PRE) 15 | DFLAGS = -D_TIMING 16 | DFLAGS += -D_EPA2A 17 | DFLAGS += -D_DECOMP_X 18 | #DFLAGS += -D_OVERWRITE # FFLAGS_2DECOMP 19 | #DFLAGS += -D_EVEN # FFLAGS_2DECOMP 20 | LDFLAGS := 21 | 22 | # Architecture switches 23 | USE_NVTX ?= 0 24 | 25 | # Required for FFTW 26 | FFTW_HOME := /home/sw-hackathons/opt/spack/linux-ubuntu18.04-broadwell/gcc-9.3.0/fftw-3.3.8-2s34e4b37yi4nlmfix2fccdftl4xd36i 27 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 28 | 29 | # Required for NVTX 30 | NVTX_LIB += -lnvToolsExt 31 | -------------------------------------------------------------------------------- /src/targets/target.raplab-cpu_nvhpc: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # GNU cpu version 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := nvc++ 7 | FC := mpif90 8 | OPT := -Minfo=accel -fast 9 | DBG := 10 | PRE := -r8 11 | 12 | # Take all the compiler flags together 13 | FFLAGS := $(OPT) $(PRE) 14 | #FFLAGS := $(DBG) $(PRE) 15 | DFLAGS = -D_TIMING 16 | DFLAGS += -D_EPA2A 17 | DFLAGS += -D_DECOMP_X 18 | #DFLAGS += -D_OVERWRITE # FFLAGS_2DECOMP 19 | #DFLAGS += -D_EVEN # FFLAGS_2DECOMP 20 | LDFLAGS := 21 | 22 | # Architecture switches 23 | USE_NVTX ?= 0 24 | 25 | # Required for FFTW 26 | FFTW_HOME := /home/sw-hackathons/opt/spack/linux-ubuntu18.04-broadwell/nvhpc-21.9/fftw-3.3.9-crw2ro6zlfzbme4mvqrpq7xo7lsdrinb 27 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 28 | 29 | # Required for NVTX 30 | NVTX_LIB += -lnvToolsExt 31 | -------------------------------------------------------------------------------- /src/targets/target.raplab-gpu: -------------------------------------------------------------------------------- 1 | # Compiler definitions 2 | # 3 | # NVFORTRAN GPU version 4 | # 5 | CPP_FLAGS := -cpp 6 | CC := nvc++ 7 | FC := mpif90 8 | OPT := -Minfo=accel -fast 9 | DBG := 10 | PRE := -r8 11 | 12 | # Take all the compiler flags together 13 | FFLAGS := $(OPT) $(PRE) 14 | #FFLAGS := $(DBG) $(PRE) 15 | DFLAGS = -D_TIMING 16 | DFLAGS += -D_EPA2A 17 | DFLAGS += -D_DECOMP_X 18 | DFLAGS += -D_BENCHMARK_NO_IO 19 | #DFLAGS += -D_OVERWRITE # FFLAGS_2DECOMP 20 | #DFLAGS += -D_EVEN # FFLAGS_2DECOMP 21 | LDFLAGS := 22 | 23 | # Architecture switches 24 | # NOTE: GPU compilation is enabled with FFLAGS and DFLAGS (see below) 25 | USE_NVTX ?= 1 26 | 27 | # Required for FFTW 28 | FFTW_HOME := /home/sw-hackathons/opt/spack/linux-ubuntu18.04-broadwell/nvhpc-21.11/fftw-3.3.9-2r7hclsmc76lwod4ms6466po7d4xbxot 29 | LDFLAGS += -L${FFTW_HOME}/lib -lfftw3 30 | 31 | FFLAGS += -cuda -acc -gpu=cc70,cuda11.0,lineinfo -cudalib=cufft,curand -Minfo=accel 32 | DFLAGS += -D_EPHC 33 | DFLAGS += -D_GPU_MPI 34 | #DFLAGS += -D_USE_NVTX_FFT 35 | 36 | # Required for NVTX 37 | NVTX_LIB += -lnvToolsExt 38 | -------------------------------------------------------------------------------- /src/types.f90: -------------------------------------------------------------------------------- 1 | ! 2 | ! SPDX-License-Identifier: MIT 3 | ! 4 | module mod_types 5 | use mpi 6 | #if defined(_SINGLE_PRECISION) 7 | integer, parameter, public :: rp = KIND(1.0) 8 | integer, parameter, public :: MPI_REAL_RP = MPI_REAL 9 | #else 10 | integer, parameter, public :: rp = KIND(0.0D0) 11 | integer, parameter, public :: MPI_REAL_RP = MPI_DOUBLE_PRECISION 12 | #endif 13 | end module mod_types 14 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/fram/compiler.out: -------------------------------------------------------------------------------- 1 | mpiifort 2 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/fram/load_fram.sh: -------------------------------------------------------------------------------- 1 | #DIR="$(cd "$(dirname "$0")" && pwd)" 2 | #export CXX=CC 3 | #export CC=cc 4 | export FC=mpiifort 5 | 6 | #module load buildenv-intel/2018a-eb 7 | #module load FFTW/3.3.6-nsc1 8 | module restore system # Restore loaded modules to the default 9 | module load intel/2018b 10 | 11 | make clean 12 | make 13 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/fram/sbatch_fram.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=NN9561K 3 | #SBATCH --job-name=we0p02 4 | #SBATCH --time=06-23:30:00 5 | #SBATCH --nodes=32 6 | #SBATCH --ntasks-per-node=32 7 | 8 | set -o errexit 9 | set -o nounset 10 | 11 | module --quiet purge 12 | module load intel/2018b 13 | 14 | #cd /cluster/work/users/nicolos/hst/low_temp_1/hst_rk_we0p02/ 15 | cd /cluster/work/users/"your_name"/ 16 | 17 | mpirun -np 1024 ./flutas > my_output_file.txt 2>&1 18 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/marconi100/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=4 3 | #SBATCH --ntasks-per-node=4 4 | #SBATCH --ntasks-per-socket=2 5 | #SBATCH --cpus-per-task=32 6 | #SBATCH --gres=gpu:4 7 | #######SBATCH --mem=230000MB 8 | #SBATCH --time 00:05:00 9 | #SBATCH -A pra22_5578 10 | #SBATCH -p m100_usr_prod 11 | #######SBATCH --qos=m100_qos_bprod 12 | #SBATCH --job-name=FluTAS_job_test_1 13 | #SBATCH --error=log.%j.err 14 | #SBATCH --output=log.%j.out 15 | 16 | #SBATCH --mail-type=ALL 17 | #######SBATCH --mail-user=marco.crialesiesposito@gmail.com 18 | #SBATCH --mail-user=demou@mech.kth.se 19 | 20 | module purge 21 | module load autoload profile/global 22 | module load fftw/3.3.8--spectrum_mpi--10.3.1--binary 23 | module load hpc-sdk/2020--binary 24 | 25 | #export OMP_NUM_THREADS=1 26 | #export OMP_PROC_BIND=true 27 | #export NO_STOP_MESSAGE=yes 28 | #export PGI_ACC_TIME=1 29 | #export PGI_ACC_NOTIFY=2 30 | #export CUDA_VISIBLE_DEVICES="$SLURM_LOCALID % $SLURM_GPUS_PER_NODE" 31 | LD_LIBRARY_PATH="/cineca/prod/opt/compilers/cuda/10.0/none/extras/CUPTI/lib64/:$LD_LIBRARY_PATH" 32 | 33 | mpirun -np 16 --map-by socket:PE=8 --rank-by core --report-bindings ./flutas 34 | 35 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/marconi100/run_interactively: -------------------------------------------------------------------------------- 1 | srun --nodes=1 --ntasks-per-node=32 --time=08:00:00 --partition m100_usr_prod --gres=gpu:4 --cpus-per-task=4 --pty /bin/bash 2 | 3 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/marconi100/run_profiling.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=4 3 | #SBATCH --ntasks-per-node=4 4 | #SBATCH --ntasks-per-socket=2 5 | #SBATCH --cpus-per-task=32 6 | #SBATCH --gres=gpu:4 7 | ##SBATCH --mem=230000MB 8 | #SBATCH --time 00:05:00 9 | #SBATCH -A iscrc_cansgpu 10 | #SBATCH -p m100_usr_prod 11 | ##SBATCH --qos=m100_qos_bprod 12 | #SBATCH --job-name=FluTAS_job_test_1 13 | #SBATCH --error=log.%j-%n.err 14 | #SBATCH --output=log.%j-%np.out 15 | 16 | #SBATCH --mail-type=ALL 17 | #SBATCH --mail-user=marco.crialesiesposito@gmail.com 18 | 19 | module purge 20 | module load autoload profile/global 21 | module load fftw/3.3.8--spectrum_mpi--10.3.1--binary 22 | module load hpc-sdk/2020--binary 23 | 24 | #export OMP_NUM_THREADS=1 25 | #export OMP_PROC_BIND=true 26 | #export NO_STOP_MESSAGE=yes 27 | #export PGI_ACC_TIME=1 28 | #export PGI_ACC_NOTIFY=2 29 | #export CUDA_VISIBLE_DEVICES="$SLURM_LOCALID % $SLURM_GPUS_PER_NODE" 30 | LD_LIBRARY_PATH="/cineca/prod/opt/compilers/cuda/10.0/none/extras/CUPTI/lib64/:$LD_LIBRARY_PATH" 31 | 32 | ####mpirun -np 16 --map-by socket:PE=8 --rank-by core --report-bindings ./flutas 33 | 34 | mpirun -np 16 --map-by socket:PE=8 --rank-by core --mca btl ^openib --report-bindings ./wrap_nsys.sh ./flutas 2>&1 | tee out 35 | -------------------------------------------------------------------------------- /utils/load_and_sbatch/marconi100/toMake: -------------------------------------------------------------------------------- 1 | module purge 2 | module load autoload profile/global 3 | module load fftw/3.3.8--spectrum_mpi--10.3.1--binary 4 | module load hpc-sdk/2020--binary 5 | 6 | make -f Makefile.mc -j4 7 | -------------------------------------------------------------------------------- /utils/plot2d/param.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | datadir = 'data/' 4 | figsdir = 'figs/' 5 | exto = '.pdf' 6 | filenamei = datadir + 'fld_u_slice_fld_0629500.bin' 7 | filenameo = figsdir + 'visu' + exto 8 | # 9 | n2 = 128 10 | n1 = 128 11 | l2 = 1.0 12 | l1 = 1.0 13 | dx1 = l1/float(n1) 14 | dx2 = l2/float(n2) 15 | # 16 | fgtitle = r'Slice at $y/h = 0.5$' 17 | cbtitle = r'$u/U_b$' 18 | x1title = r'$x/h$' 19 | x2title = r'$z/h$' 20 | -------------------------------------------------------------------------------- /utils/plot2d/plot_2d_flow_slice.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from pltparams import * 3 | from param import * 4 | import pylab 5 | # 6 | # read data 7 | # 8 | f = open(filenamei,'rb') 9 | fld = np.fromfile(f,dtype='float64') 10 | fld = np.reshape(fld,(n2,n1),order='C') 11 | f.close() 12 | # 13 | # initialize figure 14 | # 15 | initfig(ar = l2/l1) 16 | fig, ax, formatter = newfig() 17 | #ax.set_axis_off() 18 | #ax.get_xaxis().set_visible(False) 19 | #ax.get_yaxis().set_visible(False) 20 | # 21 | # plot data 22 | # 23 | x1 = np.linspace(0.+dx1/2.,l1-dx1/2.,n1) 24 | x2 = np.linspace(0.+dx2/2.,l2-dx2/2.,n2) 25 | cs1 = ax.contourf(x1, x2, fld, 26 | cmap=plt.cm.jet) 27 | cbar = fig.colorbar(cs1, orientation='vertical') 28 | # 29 | # format figrue 30 | # 31 | ax.set_title(fgtitle) 32 | cbar.ax.set_title(cbtitle) 33 | ax.set_xlabel(x1title) 34 | ax.set_ylabel(x2title) 35 | # 36 | # save figure 37 | # 38 | fig.tight_layout(pad=0.15) 39 | plt.show() 40 | fig.savefig(filenameo) 41 | -------------------------------------------------------------------------------- /utils/plot2d/pltparams.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | import numpy as np 4 | import matplotlib 5 | import matplotlib.pyplot as plt 6 | # 7 | def initfig(width = 600.0, ar = (np.sqrt(5)-1.0)/2.0, scl = 0.95): 8 | fig_width_pt = width # \showthe\columnwidth 9 | inches_per_pt = 1.0/72.27 # pt to in 10 | aspect_ratio = ar # aspect ratio 11 | fig_scale = scl # scale factor 12 | fig_width = fig_width_pt*inches_per_pt*fig_scale # width in in 13 | fig_height = fig_width*aspect_ratio # height in in 14 | fig_size = [fig_width,fig_height] # final dimensions 15 | params = {'backend' : 'ps', 16 | 'font.family' : 'serif', 17 | 'font.size' : 10, 18 | 'axes.labelsize' : 10, 19 | 'legend.fontsize': 8, 20 | 'xtick.labelsize': 8, 21 | 'ytick.labelsize': 8, 22 | 'text.usetex' : True, 23 | 'figure.figsize' : fig_size} 24 | plt.rcParams.update(params) 25 | # 26 | def format(x, pos): 27 | 'The two args are the value and tick position' 28 | return '%1.2g' % (x) 29 | # 30 | def newfig(): 31 | plt.cla() 32 | plt.clf() 33 | plt.close() 34 | fig = plt.figure() 35 | ax = fig.add_subplot(111) 36 | formatter = matplotlib.ticker.FuncFormatter(format) 37 | return fig, ax, formatter 38 | -------------------------------------------------------------------------------- /utils/plot2d/readme.txt: -------------------------------------------------------------------------------- 1 | plot_2d_flow_slice.py is a python script that reads an output file from FluTAS containing a planar section of a 3D scalar field (unformatted FORTRAN binary file). It is meant to give a simple example of how to read and plot planar data. The corresponding parameters should be changed in param.py. 2 | -------------------------------------------------------------------------------- /utils/postprocessing/clean.sh: -------------------------------------------------------------------------------- 1 | rm -rf *.out 2 | -------------------------------------------------------------------------------- /utils/preprocessing/randomDroplet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # to use, simply type on the terminal: python3 randomDroplet.py 5 | # 6 | # Import the modules 7 | # 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from mpl_toolkits.mplot3d import Axes3D 11 | # 12 | # These inputs are just examples. 13 | # Tune them according to your case 14 | # 15 | lx = 4.0 # domain dimension along x 16 | ly = 2.0 # domain dimension along y 17 | lz = 4.0 # domain dimension along z 18 | nx = 1024 # points along x 19 | ny = 512 # points along y 20 | nz = 1024 # points along x 21 | ppd = 32 # grid points per diameter (of the dispersed phase) 22 | alpha = 0.05 # gas/liquid volume fraction 23 | doWrite = 1 # write or not the results in bub.in 24 | dp_dist = 1.2 # normalized intra-droplets distance 25 | # 26 | # Preliminary calculations 27 | # 28 | dx = lx/nx 29 | dy = ly/ny 30 | dz = lz/nz 31 | d = ppd*dx 32 | V = lx*ly*lz 33 | Vb = 4/3.*np.pi*(d/2)**3 34 | nb = int(np.floor(alpha*V/Vb)) 35 | # 36 | # Generation of the random dispersed 37 | # phase distribution 38 | # 39 | xb = np.zeros(nb) 40 | zb = np.zeros(nb) 41 | yb = np.zeros(nb) 42 | domchk = np.zeros(6) 43 | for i in range(nb): 44 | #print(i) 45 | pen_chk = True 46 | while pen_chk: 47 | # 48 | xb[i] = np.random.rand()*lx 49 | yb[i] = np.random.rand()*ly 50 | zb[i] = np.random.rand()*lz 51 | chk = np.zeros(i) 52 | # 53 | domchk[0] = xb[i]-d/2 < 0 54 | domchk[1] = yb[i]-d/2 < 0 55 | domchk[2] = zb[i]-d/2 < 0 56 | domchk[3] = xb[i]+d/2 > lx 57 | domchk[4] = yb[i]+d/2 > ly 58 | domchk[5] = zb[i]+d/2 > lz 59 | # 60 | if np.sum(domchk)==0: 61 | for j in range(i): 62 | chk[j] = ((xb[j]-xb[i])**2 + 63 | (yb[j]-yb[i])**2 + 64 | (zb[j]-zb[i])**2)**0.5 < d*dp_dist 65 | # 66 | if np.sum(chk)>=1: 67 | pen_chk = True 68 | else: 69 | pen_chk = False 70 | # 71 | fig = plt.figure() 72 | ax = fig.add_subplot(111, projection='3d') 73 | ax.scatter(xb,yb,zb,s=Vb*1e3) 74 | # 75 | # print the bub.in files 76 | # 77 | if doWrite: 78 | f = open("bub.in","w") 79 | for i in range(nb): 80 | f.write("%.15f %.15f %.15f %.15f \n" %(xb[i],yb[i],zb[i],d/2)) 81 | f.close() 82 | 83 | 84 | -------------------------------------------------------------------------------- /utils/profilers/run.sh: -------------------------------------------------------------------------------- 1 | rm -rf data/* out && mpirun -np 1 --map-by socket:PE=5 --mca btl ^openib --report-bindings ./wrap_nsys.sh ../../../src/flutas 2>&1 | tee out 2 | -------------------------------------------------------------------------------- /utils/profilers/wrap_ncu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TAG_TIMESTAMP=`date +"%G%m%d-%H%M%S"` 4 | 5 | if [[ $OMPI_COMM_WORLD_RANK == 0 ]]; then 6 | ncu --set full --kernel-regex ${1} -o report--${1}--${TAG_TIMESTAMP} --target-processes all ${*:2} 7 | else 8 | $* 9 | fi 10 | -------------------------------------------------------------------------------- /utils/profilers/wrap_nsys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ $OMPI_COMM_WORLD_RANK == 0 ]]; then 3 | #nsys profile --trace=cuda,nvtx,mpi,openacc --cuda-memory-usage=true --stats=true $* 4 | nsys profile --trace=cuda,nvtx,mpi,openacc $* 5 | else 6 | $* 7 | fi 8 | -------------------------------------------------------------------------------- /utils/read_binary_data/matlab/read_single_field_binary.m: -------------------------------------------------------------------------------- 1 | % 2 | % taken from https://github.com/p-costa/CaNS/ by P. Costa 3 | % 4 | % setting up some parameters 5 | % 6 | precision = 'double'; % precision of the real-valued data 7 | r0 = [0.,0.,0.]; % domain origin 8 | non_uniform_grid = true; 9 | % 10 | % read geometry file 11 | % 12 | geofile = "geometry.out"; 13 | data = dlmread(geofile); 14 | ng = data(1,:); 15 | l = data(2,:); 16 | dl = l./ng; 17 | % 18 | % read and generate grid 19 | % 20 | xp = linspace(r0(1)+dl(1)/2.,r0(1)+l(1),ng(1)); % centered x grid 21 | yp = linspace(r0(2)+dl(2)/2.,r0(2)+l(2),ng(2)); % centered y grid 22 | zp = linspace(r0(3)+dl(3)/2.,r0(3)+l(3),ng(3)); % centered z grid 23 | xu = xp + dl(1)/2.; % staggered x grid 24 | yv = yp + dl(2)/2.; % staggered y grid 25 | zw = zp + dl(3)/2.; % staggered z grid 26 | if(non_uniform_grid) 27 | f = fopen('grid.bin'); 28 | grid_z = fread(f,[ng(3),4],precision); 29 | fclose(f); 30 | zp = r0(3) + grid_z(:,3)'; % centered z grid 31 | zw = r0(3) + grid_z(:,4)'; % staggered z grid 32 | end 33 | % 34 | % read checkpoint binary file 35 | % 36 | filenamei = input("Name of the binary file written by FluTAS (e.g. vex_fld_0000000.bin)]: ") 37 | if isempty(filenamei) 38 | end 39 | iskipx = input("Data saved every (ix, iy, iz) points. Value of ix? [1]: ") 40 | if isempty(iskipx) 41 | iskipx = 1 42 | end 43 | iskipy = input("Data saved every (ix, iy, iz) points. Value of iy? [1]: ") or "1" 44 | if isempty(iskipy) 45 | iskipy = 1 46 | end 47 | iskipz = input("Data saved every (ix, iy, iz) points. Value of iz? [1]: ") or "1" 48 | if isempty(iskipz) 49 | iskipz = 1 50 | end 51 | iskip = [iskipx,iskipy,iskipz] 52 | n = round(ng./iskip) 53 | data = zeros([n[0],n[1],n[2]]) 54 | f = fopen(filenamei); 55 | data(:,:,:) = reshape(fread(f,ng(1)*ng(2)*ng(3),precision),[ng(1),ng(2),ng(3)]); 56 | fclose(f); 57 | -------------------------------------------------------------------------------- /utils/read_binary_data/python/read_single_field_binary.py: -------------------------------------------------------------------------------- 1 | # 2 | # https://github.com/p-costa/CaNS/ by P. Costa 3 | # 4 | #!/usr/bin/env python 5 | def read_single_field_binary(filenamei,iskip): 6 | import numpy as np 7 | # 8 | # setting up some parameters 9 | # 10 | iprecision = 8 # precision of the real-valued data 11 | r0 = np.array([0.,0.,0.]) # domain origin 12 | non_uniform_grid = True 13 | precision = 'float64' 14 | if(iprecision == 4): precision = 'float32' 15 | # 16 | # read geometry file 17 | # 18 | geofile = "geometry.out" 19 | geo = np.loadtxt(geofile, comments = "!", max_rows = 2) 20 | ng = geo[0,:].astype('int') 21 | l = geo[1,:] 22 | dl = l/(1.*ng) 23 | # 24 | # read and generate grid 25 | # 26 | xp = np.arange(r0[0]+dl[0]/2.,r0[0]+l[0],dl[0]) # centered x grid 27 | yp = np.arange(r0[1]+dl[1]/2.,r0[1]+l[1],dl[1]) # centered y grid 28 | zp = np.arange(r0[2]+dl[2]/2.,r0[2]+l[2],dl[2]) # centered z grid 29 | xu = xp + dl[0]/2. # staggered x grid 30 | yv = yp + dl[1]/2. # staggered y grid 31 | zw = zp + dl[2]/2. # staggered z grid 32 | if(non_uniform_grid): 33 | f = open('grid.bin','rb') 34 | grid_z = np.fromfile(f,dtype=precision) 35 | f.close() 36 | grid_z = np.reshape(grid_z,(ng[2],4),order='F') 37 | zp = r0[2] + np.transpose(grid_z[:,2]) # centered z grid 38 | zw = r0[2] + np.transpose(grid_z[:,3]) # staggered z grid 39 | # 40 | # read binary file 41 | # 42 | n = (ng[:]/iskip[:]).astype(int) 43 | data = np.zeros([n[0],n[1],n[2]]) 44 | fld = np.fromfile(filenamei,dtype=precision) 45 | data[:,:,:] = np.reshape(fld,(n[0],n[1],n[2]),order='F') 46 | # 47 | # reshape grid 48 | # 49 | xp = xp[0:ng[0]:iskip[0]] 50 | yp = yp[0:ng[1]:iskip[1]] 51 | zp = zp[0:ng[2]:iskip[2]] 52 | xu = xu[0:ng[0]:iskip[0]] 53 | yv = yv[0:ng[1]:iskip[1]] 54 | zw = zw[0:ng[2]:iskip[2]] 55 | return data,xp,yp,zp,xu,yv,zw 56 | if __name__ == "__main__": 57 | import numpy as np 58 | filenamei = input("Name of the binary file written by FluTAS (e.g. vex_fld_0000000.bin)]: ") 59 | iskipx = input("Data saved every (ix, iy, iz) points. Value of ix? [1]: ") or "1" 60 | iskipy = input("Data saved every (ix, iy, iz) points. Value of iy? [1]: ") or "1" 61 | iskipz = input("Data saved every (ix, iy, iz) points. Value of iz? [1]: ") or "1" 62 | iskip = np.array([iskipx,iskipy,iskipz]).astype(int) 63 | read_single_field_binary(filenamei,iskip) 64 | -------------------------------------------------------------------------------- /utils/templates/template_module.f90: -------------------------------------------------------------------------------- 1 | module mod_template 2 | ! 3 | use mod_sub1, only: what_to_import 4 | ! 5 | implicit none 6 | ! 7 | private ! what is not public is private by default, no need to decleare. 8 | public :: sub_a,sub_b 9 | ! 10 | contains 11 | ! 12 | subroutine sub_a(a1,a2,a3,nh_1,nh_2,a4,a5,a6,a7,a8) 13 | ! 14 | ! Few lines of comments (please add them) 15 | ! 16 | implicit none 17 | ! 18 | character(len=*), intent(in ) :: a1 19 | integer , intent(in ), dimension(3) :: a2 20 | integer , intent(in ) :: a3 21 | integer , intent(in ) :: nh_1,nh_2 22 | real(rp) , intent(in ), dimension(3) :: a4 23 | logical , intent(in ) :: a5 24 | real(rp) , intent(in ), dimension(1-nh_1:,1-nh_1:,1-nh_1:) :: a6 25 | real(rp) , intent(in ), dimension(1-nh_2:,1-nh_2:,1-nh_2:) :: a7 26 | real(rp) , intent(out), dimension( 0:, 0:, 0:) :: a8 27 | ! 28 | real(rp), dimension(0:,0:,0:) :: b1 29 | ! 30 | ! Body of the subroutine 31 | ! 32 | return 33 | end subroutine sub_a 34 | ! 35 | subroutine sub_b(nx,ny,nz,a1,a2,a3,a4,nh_1,nh_2,a5,a6,a7) 36 | ! 37 | ! Few lines of comments (please add them) 38 | ! 39 | implicit none 40 | ! 41 | integer , intent(in ) :: nx,ny,nz 42 | integer , intent(in ), dimension(3) :: a1 43 | real(rp), intent(in ), dimension(3) :: a2 44 | integer , intent(in ) :: a3 45 | logical , intent(in ) :: a4 46 | integer , intent(in ) :: nh_1,nh_2 47 | real(rp), intent(in ), dimension(1-nh_1:,1-nh_1:,1-nh_1:) :: a5 48 | real(rp), intent(in ), dimension(1-nh_2:,1-nh_2:,1-nh_2:) :: a6 49 | real(rp), intent(out), dimension( 0:, 0:, 0:) :: a7 50 | ! 51 | real(rp) :: b1 52 | real(rp) :: b2 53 | integer :: b3 54 | ! 55 | ! Body of the subroutine 56 | ! 57 | do k=1,nz 58 | do j=1,ny 59 | do i=1,nx 60 | ! 61 | ! ... 62 | ! 63 | enddo 64 | enddo 65 | enddo 66 | ! 67 | return 68 | end subroutine sub_b 69 | ! 70 | end module mod_template 71 | -------------------------------------------------------------------------------- /utils/testing/autoTesting.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file performs the automatic testing for all the cases in ......... 3 | ''' 4 | 5 | import numpy as np 6 | import os 7 | 8 | def readstp(fn): 9 | f = open(fn, 'r') 10 | for i in f: 11 | exec(i) 12 | f.close() 13 | globals().update(locals()) 14 | 15 | # define execution variables 16 | wfold = '../../' 17 | compiler = 'PGI' 18 | tfold = 'utils/testing/templateTest/' 19 | doDebugFlag = True 20 | #conf = 'two_phase_inc_isot' 21 | cdir = os.getcwd() 22 | 23 | readstp(wfold+tfold+'test.stp') 24 | os.chdir('../../src') 25 | os.system('cp '+conf+'/* .') 26 | os.system('make -f Makefile.mc clean; make -f Makefile.mc COMP=PGI DBG_FLAG=0') 27 | os.system('cp flutas '+cdir+'/templateTest') 28 | os.chdir(cdir+'/templateTest') 29 | os.system('mpirun -np 4 flutas; pwd') 30 | os.chdir(cdir) 31 | -------------------------------------------------------------------------------- /utils/testing/templateTest/dns.in: -------------------------------------------------------------------------------- 1 | 32 32 64 ! itot, jtot, ktot 2 | 1.d0 1.d0 2.d0 ! lx, ly, lz 3 | 0. ! gr 4 | .25 1.0e-3 ! cfl, dt_input 5 | F ! constant_dt 6 | 1000.0 10.0 ! rho_sp, mu_sp 7 | zer ! inivel 8 | F hkv ! is_wallturb, wallturb_type 9 | cfr ! bulk_ftype 10 | 100000 3. 0.1 ! nstep,time_max,tw_max 11 | F T F ! stop_type(1:3) 12 | F ! restart 13 | 10 10 200000 50000000 100 2000 ! icheck, iout0d, iout1d, iout2d, iout3d, isave 14 | D D D D D D ! cbcvel(0:1,1:3,1) [u BC type] 15 | D D D D D D ! cbcvel(0:1,1:3,2) [v BC type] 16 | D D D D D D ! cbcvel(0:1,1:3,3) [w BC type] 17 | N N N N N N ! cbcpre(0:1,1:3 ) [p BC type] 18 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,1) [u BC value] 19 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,2) [v BC value] 20 | 0. 0. 0. 0. 0. 0. ! bcvel(0:1,1:3,3) [w BC value] 21 | 0. 0. 0. 0. 0. 0. ! bcpre(0:1,1:3 ) [p BC value] 22 | F F F ! is_forced(1:3) 23 | 0. 0. -0.98 ! gacc_x,gacc_y,gacc_z 24 | 0. 0. 0. ! bvel_x,bvel_y,bvel_z 25 | 0. 0. 0. ! dpdl_x,dpdl_y,dpdl_z 26 | F F F F F F ! is_outflow(0:1,1:3) 27 | 1 1 ! dims_in(1:2) 28 | 4 ! numthreadsmax 29 | -------------------------------------------------------------------------------- /utils/testing/templateTest/test.stp: -------------------------------------------------------------------------------- 1 | conf = 'two_phase_inc_isot' 2 | dbg = True 3 | -------------------------------------------------------------------------------- /utils/testing/templateTest/vof.in: -------------------------------------------------------------------------------- 1 | 100. 1000. 1. 10. ! rho1, rho2, mu1,mu2 2 | bub ! inivof 3 | 1 ! nbub 4 | 0.5 0.5 0.5 0.25 ! xc, yc, zc, r 5 | N N N N N N ! cbcvof 6 | 0. 0. 0. 0. 0. 0. ! bcvof 7 | 24.5 ! sigma 8 | F 0 ! late_init,i_late_init 9 | --------------------------------------------------------------------------------