├── 2D ├── Advection │ ├── 2D_Advection_parallell.py │ ├── 2D_Advection_parallell_turbulence.py │ ├── 2D_Advection_parallell_turbulence_IDUN.py │ ├── 2D_Advection_second_upwind.py │ ├── 2d_advection_diff_central_no_parallell.py │ ├── Variance_computation.py │ └── read_field.py ├── Conventional_formulation │ ├── 2DNS_plot.py │ ├── 2DNS_spectral.py │ └── 2d_spectral_plot.py └── vorticity_formulation │ ├── 2DSpectral_vorticity.py │ ├── 2DSpectral_vorticity_MPI.py │ ├── 2DSpectral_vorticity_MPI_IDUN.py │ ├── 2D_mpi_example.py │ └── 2D_vorticity_mpi_scaling_script.py ├── 3D ├── MASTER │ ├── 3D_advection_mpi.py │ ├── 3D_dns_pencil.py │ ├── 3D_dns_pencil_particle_and_advection.py │ ├── 3D_dns_speedTest_Vilje.py │ ├── 3d_dns_SLAB.py │ ├── MPI_func │ │ ├── __init__.py │ │ └── mpibase.py │ ├── Particle_mpi.py │ ├── Post_processing │ │ ├── Compute_variance.py │ │ ├── Variance_computation.py │ │ ├── __init__.py │ │ └── spectrum_plotting.py │ ├── __init__.py │ ├── advection_for_dns_solver.py │ ├── math_dir │ │ └── setup.py │ └── post_processing.py ├── Project │ ├── 3DNS_spectral.py │ └── Plotting │ │ ├── 3DNS_dynamic_plot.py │ │ ├── basic_units.py │ │ ├── dns_plot.py │ │ └── radians_plot.py └── __init__.py ├── README.md ├── README.tex.md ├── Texts ├── MASTER_2020_Halvorsen.pdf ├── Molten_salt_reactor___TEP4545.pdf ├── Project_NTNU_2019.pdf └── Studies of turbulent diffusion through Direct Numerical Simulation.pdf ├── animation_folder ├── 2D │ ├── 256_256_5e-4nu-1e-3dt.gif │ ├── VorticityAnimation.gif │ ├── animationVelocitynu5e-4N64dt1e-2tend200.gif │ ├── fieldspread.gif │ ├── fieldspread2.gif │ ├── nice.gif │ └── nice2.gif └── 3D │ ├── 256spectrum_fixedaxis.gif │ ├── 512_70_xy.gif │ ├── 512_70_xz.gif │ ├── 512_70_yz.gif │ ├── 512isotropic_green.gif │ ├── Particle_6550.gif │ ├── TG3D_128.gif │ ├── TG3D_64.gif │ ├── animation64_160k.gif │ ├── dissipation.png │ ├── iso256Nice.gif │ ├── iso512_niceEnergy.gif │ ├── particlegif_t_10.gif │ ├── spectrum512.gif │ ├── spectrum512_43.gif │ └── spectrum512_niceEnergy.gif └── tex ├── 0b73f1dfec99f4698e9399208969619d.svg └── 25a967376d35e3217b572b3aaf7fc6e0.svg /2D/Advection/2D_Advection_parallell.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | from mpi4py import MPI 4 | from tqdm import tqdm 5 | import matplotlib 6 | 7 | #matplotlib.use('Agg') 8 | import matplotlib.pyplot as plt 9 | from scipy.stats import multivariate_normal 10 | 11 | # Set the colormap 12 | plt.rcParams['image.cmap'] = 'BrBG' 13 | 14 | # Basic parameters 15 | N= 128 16 | dt = 0.01 17 | tend = 10 18 | 19 | 20 | D = 0.008# Diffusion constant 21 | L = 2*np.pi # 2pi = one period 22 | dx = L/N 23 | dy = dx # equidistant 24 | dx2 = dx ** 2 25 | dy2 = dy ** 2 26 | r = dt * D / (dx ** 2) 27 | assert (0 < 2 * r <= 0.5),('N too high. Reduce time step to uphold stability') 28 | timesteps = int(np.ceil(tend/dt)) 29 | image_interval = timesteps*0.01 # Write frequency for png files 30 | 31 | x = np.arange(0, N, 1) * L / N 32 | y = np.arange(0, N, 1) * L / N 33 | [X, Y] = np.meshgrid(x, y) 34 | 35 | 36 | fig, axs = plt.subplots(2) 37 | fig.suptitle('Title here') 38 | 39 | 40 | # For stability, this is the largest interval possible 41 | # for the size of the time-step: 42 | # dt = dx2*dy2 / ( 2*D*(dx2+dy2) ) 43 | 44 | 45 | # MPI globals 46 | comm = MPI.COMM_WORLD 47 | rank = comm.Get_rank() 48 | size = comm.Get_size() 49 | 50 | # Up/down neighbouring MPI ranks 51 | up = rank - 1 52 | if up < 0: 53 | up = size-1 54 | down = rank + 1 55 | if down > size - 1: 56 | down = 0 57 | 58 | 59 | def evolve(sol_new, sol,u,v, D, dt, dx2, dy2): 60 | """Explicit time evolution. 61 | u: new temperature field 62 | u_previous: previous field 63 | a: diffusion constant 64 | dt: time step 65 | dx2: grid spacing squared, i.e. dx^2 66 | dy2: -- "" -- , i.e. dy^2""" 67 | # LEFT boundary 68 | 69 | sol_new[1:-1, 0] = sol[1:-1, 0] \ 70 | + (dt*u[1:-1,0]) / (2*dx) * (sol[:-2, 0] - sol[2:, 0]) \ 71 | + (dt*v[1:-1,0]) / (2*dx) * (sol[1:-1, -1] - sol[1:-1, 1]) \ 72 | + r * (sol[2:, 0] - 2 * sol[1:-1, 0] + sol[:-2, 0]) \ 73 | + r * (sol[1:-1, 1] - 2 * sol[1:-1, 0] + sol[1:-1, -1]) 74 | #INNER points 75 | sol_new[1:-1, 1:-1] = sol[1:-1, 1:-1] \ 76 | + (dt*u[1:-1,1:-1]) / (2*dx) * (sol[:-2,1:-1] - sol[2:,1:-1]) \ 77 | + (dt*v[1:-1,1:-1]) / (2*dx) * (sol[1:-1,:-2] - sol[1:-1,2:]) \ 78 | + r * (sol[2:, 1:-1] - 2 * sol[1:-1, 1:-1] + sol[:-2, 1:-1]) \ 79 | + r * (sol[1:-1, 2:] - 2 * sol[1:-1, 1:-1] + sol[1:-1, :-2]) 80 | #RIGHT boundary 81 | sol_new[1:-1, -1] = sol[1:-1, -1] \ 82 | + (dt*u[1:-1,-1]) / (2*dx) * (sol[:-2, -1] - sol[2:, -1]) \ 83 | + (dt*v[1:-1,-1]) / (2*dx) * (sol[1:-1, -2] - sol[1:-1, 0]) \ 84 | + r * (sol[2:, -1] - 2 * sol[1:-1, -1] + sol[:-2, -1]) \ 85 | + r * (sol[1:-1, 0] - 2 * sol[1:-1, -1] + sol[1:-1, -2]) 86 | 87 | #Update next time step 88 | sol[:] = sol_new[:] 89 | #sol[:] = sol[:]/(np.sum(sol[:])) #cheat with mass conservation. Assume uniform loss over each cell 90 | 91 | def init_fields(X,Y): 92 | # Read the initial temperature field from file 93 | #field = np.loadtxt(filename) 94 | #field0 = field.copy() # Array for field of previous time step 95 | pos = np.dstack((X, Y)) 96 | mu = np.array([2, 3]) 97 | cov = np.array([[.05, .010], [.010, .05]]) 98 | rv = multivariate_normal(mu, cov) 99 | S = rv.pdf(pos) 100 | field = S.copy() / (np.sum(S)) 101 | field0 = field.copy() 102 | 103 | 104 | #A = 0.1 105 | #omega = 1 106 | #epsilon = 0.25 107 | #u = -np.pi*A*np.sin(np.pi*(1)*X)*np.cos(np.pi*Y) 108 | #v = -np.pi*A*np.cos(np.pi*(1)*X)*np.sin(np.pi*Y)*(1-2*epsilon) 109 | 110 | u = np.sin(1*X) * np.cos(1*Y)*0.1 111 | v = np.cos(1*X) * np.cos(1*Y)*0.1 112 | # u = np.random.rand(N, N)*1 113 | # v = np.random.rand(N, N)*1 114 | #u = np.ones((N,N))*1 115 | #v = np.ones((N,N))*1 116 | cu = dt * np.max(u) / dx 117 | cv = dt * np.max(v) / dx 118 | assert (((cu ** 2) / r) + ((cv ** 2) / r) <= 2), ('dt might be too high or diffusion constant might be too low') 119 | 120 | return field, field0, u,v 121 | 122 | 123 | def write_field(field, step): 124 | plt.gca().clear() 125 | plt.imshow(field,cmap='jet') 126 | plt.axis('off') 127 | plt.savefig('heat_{0:03d}.png'.format(step)) 128 | 129 | 130 | def exchange(field): 131 | # send down, receive from up 132 | sbuf = field[-2, :] 133 | rbuf = field[0, :] 134 | comm.Sendrecv(sbuf, dest=down, recvbuf=rbuf, source=up) 135 | # send up, receive from down 136 | sbuf = field[1, :] 137 | rbuf = field[-1, :] 138 | comm.Sendrecv(sbuf, dest=up, recvbuf=rbuf, source=down) 139 | 140 | 141 | def iterate(field,u,v, local_field, local_field0,local_u,local_v, timesteps, image_interval): 142 | step = 1 143 | pbar = tqdm(total=int(timesteps)) 144 | for m in range(1, timesteps + 1): 145 | exchange(local_field0) 146 | evolve(local_field, local_field0,local_u,local_v ,D, dt, dx2, dy2) 147 | step += 1 148 | pbar.update(1) 149 | if m % image_interval == 0: 150 | comm.Gather(local_field[1:-1, :], field, root=0) 151 | comm.Gather(local_u[1:-1, :], u, root=0) 152 | comm.Gather(local_v[1:-1, :], v, root=0) 153 | if rank == 0: 154 | #write_field(field, m) 155 | #plt.imshow(field.T,cmap='jet') 156 | axs[0].imshow(field, cmap='jet') # ,vmax=1,vmin=0) 157 | axs[1].imshow((v), cmap='jet') 158 | #plt.show() 159 | plt.pause(0.05) 160 | 161 | 162 | 163 | def main(): 164 | # Read and scatter the initial temperature field 165 | if rank == 0: 166 | field, field0,u,v = init_fields(X,Y) 167 | shape = field.shape 168 | dtype = field.dtype 169 | comm.bcast(shape, 0) # broadcast dimensions 170 | comm.bcast(dtype, 0) # broadcast data type 171 | else: 172 | field = None 173 | u = None 174 | v = None 175 | shape = comm.bcast(None, 0) 176 | dtype = comm.bcast(None, 0) 177 | if shape[0] % size: 178 | raise ValueError('Number of rows in the field (' \ 179 | + str(shape[0]) + ') needs to be divisible by the number ' \ 180 | + 'of MPI tasks (' + str(size) + ').') 181 | n = int(shape[0] / size) # number of rows for each MPI task 182 | m = shape[1] # number of columns in the field 183 | buff = np.zeros((n, m), dtype) 184 | comm.Scatter(field, buff, 0) # scatter the data 185 | local_field = np.zeros((n + 2, m), dtype) # need two ghost rows! 186 | local_field[1:-1, :] = buff # copy data to non-ghost rows 187 | local_field0 = np.zeros_like(local_field) # array for previous time step 188 | 189 | comm.Scatter(u, buff, 0) # scatter the data 190 | local_u = np.zeros((n + 2, m), dtype) # need two ghost rows! 191 | local_u[1:-1, :] = buff # copy data to non-ghost rows 192 | 193 | comm.Scatter(v, buff, 0) # scatter the data 194 | local_v = np.zeros((n + 2, m), dtype) # need two ghost rows! 195 | local_v[1:-1, :] = buff # copy data to non-ghost rows 196 | 197 | 198 | # Fix outer boundary ghost layers to account for aperiodicity? 199 | ''' 200 | if True: 201 | if rank == 0: 202 | local_field[0, :] =local_field[1, :] 203 | if rank == size - 1: 204 | local_field[-1, :] = local_field[-2, :] 205 | ''' 206 | local_field0[:] = local_field[:] 207 | 208 | # Plot/save initial field 209 | #if rank == 0: 210 | #write_field(field, 0) 211 | # Iterate 212 | t0 = time.time() 213 | iterate(field,u,v, local_field, local_field0,local_u,local_v, timesteps, image_interval) 214 | t1 = time.time() 215 | # Plot/save final field 216 | comm.Gather(local_field[1:-1, :], field, root=0) 217 | if rank == 0: 218 | write_field(field, timesteps) 219 | print("Running time: {0}".format(t1 - t0)) 220 | 221 | 222 | if __name__ == '__main__': 223 | main() 224 | -------------------------------------------------------------------------------- /2D/Advection/2D_Advection_parallell_turbulence.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | from mpi4py import MPI 4 | from tqdm import tqdm 5 | import matplotlib 6 | from pathlib import Path 7 | 8 | # matplotlib.use('Agg') 9 | import matplotlib.pyplot as plt 10 | from scipy.stats import multivariate_normal 11 | path = Path('/home/danieloh/PycharmProjects/Project_Turbulence_Modelling/2D/Advection') 12 | 13 | # Set the colormap 14 | plt.rcParams['image.cmap'] = 'BrBG' 15 | 16 | # Basic parameters 17 | N = 64 18 | dt = 1e-2 19 | tend = 50 20 | 21 | D = 0.008 # Diffusion constant 22 | L = 2 * np.pi # 2pi = one period 23 | dx = L / N 24 | dy = dx # equidistant 25 | dx2 = dx ** 2 26 | dy2 = dy ** 2 27 | r = dt * D / (dx ** 2) 28 | assert (0 < 2 * r <= 0.5), ('N too high. Reduce time step to uphold stability') 29 | timesteps = int(np.ceil(tend / dt)) 30 | load_every = 0.01*timesteps 31 | breaker = load_every 32 | image_interval = timesteps * 0.05 # Write frequency for png files 33 | field_store_counter=0 34 | 35 | filenames_u = ['datafiles/u/u_vel_t_0.npy'] 36 | filenames_v = ['datafiles/v/v_vel_t_0.npy'] 37 | 38 | for i in range(1,int((timesteps/load_every)+1)): 39 | filenames_u.append('datafiles/u/u_vel_t_'+str(i)+'.npy') 40 | filenames_v.append('datafiles/v/v_vel_t_'+str(i)+'.npy') 41 | 42 | 43 | 44 | x = np.arange(0, N, 1) * L / N 45 | y = np.arange(0, N, 1) * L / N 46 | [X, Y] = np.meshgrid(x, y) 47 | 48 | fig, axs = plt.subplots(2) 49 | fig.suptitle('Title here') 50 | 51 | # For stability, this is the largest interval possible 52 | # for the size of the time-step: 53 | # dt = dx2*dy2 / ( 2*D*(dx2+dy2) ) 54 | 55 | 56 | # MPI globals 57 | comm = MPI.COMM_WORLD 58 | rank = comm.Get_rank() 59 | size = comm.Get_size() 60 | 61 | # Up/down neighbouring MPI ranks 62 | up = rank - 1 63 | if up < 0: 64 | up = size - 1 65 | down = rank + 1 66 | if down > size - 1: 67 | down = 0 68 | 69 | 70 | def evolve(sol_new, sol, u, v, D, dt, dx2, dy2): 71 | """Explicit time evolution. 72 | u: new temperature field 73 | u_previous: previous field 74 | a: diffusion constant 75 | dt: time step 76 | dx2: grid spacing squared, i.e. dx^2 77 | dy2: -- "" -- , i.e. dy^2""" 78 | # LEFT boundary 79 | 80 | sol_new[1:-1, 0] = sol[1:-1, 0] \ 81 | + (dt * u[1:-1, 0]) / (2 * dx) * (sol[:-2, 0] - sol[2:, 0]) \ 82 | + (dt * v[1:-1, 0]) / (2 * dx) * (sol[1:-1, -1] - sol[1:-1, 1]) \ 83 | + r * (sol[2:, 0] - 2 * sol[1:-1, 0] + sol[:-2, 0]) \ 84 | + r * (sol[1:-1, 1] - 2 * sol[1:-1, 0] + sol[1:-1, -1]) 85 | # INNER points 86 | sol_new[1:-1, 1:-1] = sol[1:-1, 1:-1] \ 87 | + (dt * u[1:-1, 1:-1]) / (2 * dx) * (sol[:-2, 1:-1] - sol[2:, 1:-1]) \ 88 | + (dt * v[1:-1, 1:-1]) / (2 * dx) * (sol[1:-1, :-2] - sol[1:-1, 2:]) \ 89 | + r * (sol[2:, 1:-1] - 2 * sol[1:-1, 1:-1] + sol[:-2, 1:-1]) \ 90 | + r * (sol[1:-1, 2:] - 2 * sol[1:-1, 1:-1] + sol[1:-1, :-2]) 91 | # RIGHT boundary 92 | sol_new[1:-1, -1] = sol[1:-1, -1] \ 93 | + (dt * u[1:-1, -1]) / (2 * dx) * (sol[:-2, -1] - sol[2:, -1]) \ 94 | + (dt * v[1:-1, -1]) / (2 * dx) * (sol[1:-1, -2] - sol[1:-1, 0]) \ 95 | + r * (sol[2:, -1] - 2 * sol[1:-1, -1] + sol[:-2, -1]) \ 96 | + r * (sol[1:-1, 0] - 2 * sol[1:-1, -1] + sol[1:-1, -2]) 97 | 98 | # Update next time step 99 | sol[:] = sol_new[:] 100 | # sol[:] = sol[:]/(np.sum(sol[:])) #cheat with mass conservation. Assume uniform loss over each cell 101 | 102 | 103 | 104 | 105 | 106 | def init_fields(X, Y): 107 | # Read the initial temperature field from file 108 | # field = np.loadtxt(filename) 109 | # field0 = field.copy() # Array for field of previous time step 110 | pos = np.dstack((X, Y)) 111 | mu = np.array([2, 3]) 112 | cov = np.array([[.05, .010], [.010, .05]]) 113 | rv = multivariate_normal(mu, cov) 114 | S = rv.pdf(pos) 115 | field = S.copy() / (np.sum(S)) 116 | field0 = field.copy() 117 | 118 | # A = 0.1 119 | # omega = 1 120 | # epsilon = 0.25 121 | # u = -np.pi*A*np.sin(np.pi*(1)*X)*np.cos(np.pi*Y) 122 | # v = -np.pi*A*np.cos(np.pi*(1)*X)*np.sin(np.pi*Y)*(1-2*epsilon) 123 | 124 | u = np.sin(1 * X) * np.cos(1 * Y) * 0.1 125 | v = np.cos(1 * X) * np.cos(1 * Y) * 0.1 126 | # u = np.random.rand(N, N)*1 127 | # v = np.random.rand(N, N)*1 128 | # u = np.ones((N,N))*1 129 | # v = np.ones((N,N))*1 130 | cu = dt * np.max(u) / dx 131 | cv = dt * np.max(v) / dx 132 | assert (((cu ** 2) / r) + ((cv ** 2) / r) <= 2), ('dt might be too high or diffusion constant might be too low') 133 | 134 | return field, field0, u, v 135 | 136 | 137 | def write_field(field, step): 138 | plt.gca().clear() 139 | plt.imshow(field, cmap='jet') 140 | plt.axis('off') 141 | plt.savefig('heat_{0:03d}.png'.format(step)) 142 | 143 | 144 | def exchange(field): 145 | # send down, receive from up 146 | sbuf = field[-2, :] 147 | rbuf = field[0, :] 148 | comm.Sendrecv(sbuf, dest=down, recvbuf=rbuf, source=up) 149 | # send up, receive from down 150 | sbuf = field[1, :] 151 | rbuf = field[-1, :] 152 | comm.Sendrecv(sbuf, dest=up, recvbuf=rbuf, source=down) 153 | 154 | 155 | def iterate(field, local_field, local_field0, timesteps, image_interval,field_store_counter): 156 | step = 1 157 | pbar = tqdm(total=int(timesteps)) 158 | counter=0 159 | indexnr = 1 160 | for t in range(0, timesteps ): 161 | if (t == 0): 162 | u_init = np.load(str(path.parent)+'/vorticity_formulation/'+filenames_u[0]) 163 | v_init = np.load(str(path.parent)+'/vorticity_formulation/'+filenames_v[0]) 164 | shape = u_init.shape 165 | dtype = u_init.dtype 166 | comm.bcast(shape, 0) # broadcast dimensions 167 | comm.bcast(dtype, 0) # broadcast data type 168 | n = int(shape[0] / size) # number of rows for each MPI task 169 | m = shape[1] # number of columns in the field 170 | buff = np.zeros((n, m), dtype) 171 | 172 | comm.Scatter(u_init, buff, 0) # scatter the data 173 | local_u = np.zeros((n + 2, m), dtype) # need two ghost rows! 174 | local_u[1:-1, :] = buff # copy data to non-ghost rows 175 | 176 | comm.Scatter(v_init, buff, 0) # scatter the data 177 | local_v = np.zeros((n + 2, m), dtype) # need two ghost rows! 178 | local_v[1:-1, :] = buff # copy data to non-ghost rows 179 | 180 | exchange(local_field0) 181 | exchange(local_u) 182 | exchange(local_v) 183 | evolve(local_field, local_field0, local_u, local_v, D, dt, dx2, dy2) 184 | 185 | if (t != 0): 186 | if (t%breaker==1): 187 | u = np.load(str(path.parent)+'/vorticity_formulation/'+filenames_u[indexnr]) 188 | v = np.load(str(path.parent)+'/vorticity_formulation/'+filenames_v[indexnr]) 189 | indexnr +=1 190 | shape = u[0].shape 191 | dtype = u[0].dtype 192 | comm.bcast(shape, 0) # broadcast dimensions 193 | comm.bcast(dtype, 0) # broadcast data type 194 | n = int(shape[0] / size) # number of rows for each MPI task 195 | m = shape[1] # number of columns in the field 196 | buff = np.zeros((n, m), dtype) 197 | 198 | comm.Scatter(u[int(counter%breaker)], buff, 0) # scatter the data 199 | local_u = np.zeros((n + 2, m), dtype) # need two ghost rows! 200 | local_u[1:-1, :] = buff # copy data to non-ghost rows 201 | 202 | comm.Scatter(v[int(counter%breaker)], buff, 0) # scatter the data 203 | local_v = np.zeros((n + 2, m), dtype) # need two ghost rows! 204 | local_v[1:-1, :] = buff # copy data to non-ghost rows 205 | 206 | 207 | 208 | exchange(local_field0) 209 | exchange(local_u) 210 | exchange(local_v) 211 | evolve(local_field, local_field0, local_u, local_v, D, dt, dx2, dy2) 212 | step += 1 213 | counter +=1 214 | pbar.update(1) 215 | 216 | if (t %breaker == 0): 217 | comm.Gather(local_field[1:-1, :], field, root=0) 218 | if (rank == 0): 219 | print('hello') 220 | np.save('datafiles/concentration/field_' + str(round(t)), field) 221 | #field_store_counter += 1 222 | 223 | 224 | ''' 225 | if (t % image_interval == 0 and t!=0): 226 | comm.Gather(local_field[1:-1, :], field, root=0) 227 | comm.Gather(local_u[1:-1, :], u[int(counter%breaker)], root=0) 228 | comm.Gather(local_v[1:-1, :], v[int(counter%breaker)], root=0) 229 | if rank == 0: 230 | # write_field(field, m) 231 | # plt.imshow(field.T,cmap='jet') 232 | axs[0].imshow(field, cmap='jet') # ,vmax=1,vmin=0) 233 | axs[1].imshow((v[int(counter%breaker)]**2+u[int(counter%breaker)]**2), cmap='jet') 234 | # plt.show() 235 | plt.pause(0.05) 236 | 237 | ''' 238 | def main(): 239 | # Read and scatter the initial temperature field 240 | if rank == 0: 241 | field, field0, u, v = init_fields(X, Y) 242 | shape = field.shape 243 | dtype = field.dtype 244 | comm.bcast(shape, 0) # broadcast dimensions 245 | comm.bcast(dtype, 0) # broadcast data type 246 | else: 247 | field = None 248 | u = None 249 | v = None 250 | shape = comm.bcast(None, 0) 251 | dtype = comm.bcast(None, 0) 252 | if shape[0] % size: 253 | raise ValueError('Number of rows in the field (' \ 254 | + str(shape[0]) + ') needs to be divisible by the number ' \ 255 | + 'of MPI tasks (' + str(size) + ').') 256 | n = int(shape[0] / size) # number of rows for each MPI task 257 | m = shape[1] # number of columns in the field 258 | buff = np.zeros((n, m), dtype) 259 | comm.Scatter(field, buff, 0) # scatter the data 260 | local_field = np.zeros((n + 2, m), dtype) # need two ghost rows! 261 | local_field[1:-1, :] = buff # copy data to non-ghost rows 262 | local_field0 = np.zeros_like(local_field) # array for previous time step 263 | local_field0[:] = local_field[:] 264 | 265 | t0 = time.time() 266 | iterate(field, local_field, local_field0, timesteps, image_interval,field_store_counter) 267 | t1 = time.time() 268 | # Plot/save final field 269 | comm.Gather(local_field[1:-1, :], field, root=0) 270 | if rank == 0: 271 | write_field(field, timesteps) 272 | print("Running time: {0}".format(t1 - t0)) 273 | 274 | 275 | if __name__ == '__main__': 276 | main() 277 | -------------------------------------------------------------------------------- /2D/Advection/2D_Advection_parallell_turbulence_IDUN.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | from mpi4py import MPI 4 | 5 | import matplotlib 6 | from pathlib import Path 7 | 8 | # matplotlib.use('Agg') 9 | import matplotlib.pyplot as plt 10 | from scipy.stats import multivariate_normal 11 | 12 | try: 13 | from tqdm import tqdm 14 | except ImportError: 15 | pass 16 | 17 | 18 | path = Path('/home/danieloh/PycharmProjects/Project_Turbulence_Modelling/2D/Advection') 19 | 20 | # Set the colormap 21 | plt.rcParams['image.cmap'] = 'BrBG' 22 | 23 | # Basic parameters 24 | N = 256 25 | dt = 1e-3 26 | tend = 100 27 | 28 | D = 0.0008 # Diffusion constant 29 | L = 2 * np.pi # 2pi = one period 30 | dx = L / N 31 | dy = dx # equidistant 32 | dx2 = dx ** 2 33 | dy2 = dy ** 2 34 | r = dt * D / (dx ** 2) 35 | assert (0 < 2 * r <= 0.5), ('N too high. Reduce time step to uphold stability') 36 | timesteps = int(np.ceil(tend / dt)) 37 | load_every = 0.01*timesteps 38 | breaker = load_every 39 | image_interval = timesteps * 0.05 # Write frequency for png files 40 | field_store_counter=0 41 | 42 | 43 | filenames_u = ['datafiles/u/u_vel_t_0.npy'] 44 | filenames_v = ['datafiles/v/v_vel_t_0.npy'] 45 | 46 | for i in range(1,int(timesteps/load_every)+1): 47 | filenames_u.append('datafiles/u/u_vel_t_'+str(i)+'.npy') 48 | filenames_v.append('datafiles/v/v_vel_t_'+str(i)+'.npy') 49 | 50 | 51 | print('finished loading file names') 52 | 53 | x = np.arange(0, N, 1) * L / N 54 | y = np.arange(0, N, 1) * L / N 55 | [X, Y] = np.meshgrid(x, y) 56 | 57 | fig, axs = plt.subplots(2) 58 | fig.suptitle('Title here') 59 | 60 | # For stability, this is the largest interval possible 61 | # for the size of the time-step: 62 | # dt = dx2*dy2 / ( 2*D*(dx2+dy2) ) 63 | 64 | 65 | # MPI globals 66 | comm = MPI.COMM_WORLD 67 | rank = comm.Get_rank() 68 | size = comm.Get_size() 69 | 70 | # Up/down neighbouring MPI ranks 71 | up = rank - 1 72 | if up < 0: 73 | up = size - 1 74 | down = rank + 1 75 | if down > size - 1: 76 | down = 0 77 | 78 | 79 | def evolve(sol_new, sol, u, v, D, dt, dx2, dy2): 80 | """Explicit time evolution. 81 | u: new temperature field 82 | u_previous: previous field 83 | a: diffusion constant 84 | dt: time step 85 | dx2: grid spacing squared, i.e. dx^2 86 | dy2: -- "" -- , i.e. dy^2""" 87 | # LEFT boundary 88 | 89 | sol_new[1:-1, 0] = sol[1:-1, 0] \ 90 | + (dt * u[1:-1, 0]) / (2 * dx) * (sol[:-2, 0] - sol[2:, 0]) \ 91 | + (dt * v[1:-1, 0]) / (2 * dx) * (sol[1:-1, -1] - sol[1:-1, 1]) \ 92 | + r * (sol[2:, 0] - 2 * sol[1:-1, 0] + sol[:-2, 0]) \ 93 | + r * (sol[1:-1, 1] - 2 * sol[1:-1, 0] + sol[1:-1, -1]) 94 | # INNER points 95 | sol_new[1:-1, 1:-1] = sol[1:-1, 1:-1] \ 96 | + (dt * u[1:-1, 1:-1]) / (2 * dx) * (sol[:-2, 1:-1] - sol[2:, 1:-1]) \ 97 | + (dt * v[1:-1, 1:-1]) / (2 * dx) * (sol[1:-1, :-2] - sol[1:-1, 2:]) \ 98 | + r * (sol[2:, 1:-1] - 2 * sol[1:-1, 1:-1] + sol[:-2, 1:-1]) \ 99 | + r * (sol[1:-1, 2:] - 2 * sol[1:-1, 1:-1] + sol[1:-1, :-2]) 100 | # RIGHT boundary 101 | sol_new[1:-1, -1] = sol[1:-1, -1] \ 102 | + (dt * u[1:-1, -1]) / (2 * dx) * (sol[:-2, -1] - sol[2:, -1]) \ 103 | + (dt * v[1:-1, -1]) / (2 * dx) * (sol[1:-1, -2] - sol[1:-1, 0]) \ 104 | + r * (sol[2:, -1] - 2 * sol[1:-1, -1] + sol[:-2, -1]) \ 105 | + r * (sol[1:-1, 0] - 2 * sol[1:-1, -1] + sol[1:-1, -2]) 106 | 107 | # Update next time step 108 | sol[:] = sol_new[:] 109 | # sol[:] = sol[:]/(np.sum(sol[:])) #cheat with mass conservation. Assume uniform loss over each cell 110 | 111 | 112 | 113 | 114 | 115 | def init_fields(X, Y): 116 | # Read the initial temperature field from file 117 | # field = np.loadtxt(filename) 118 | # field0 = field.copy() # Array for field of previous time step 119 | pos = np.dstack((X, Y)) 120 | mu = np.array([2, 3]) 121 | cov = np.array([[.05, .010], [.010, .05]]) 122 | rv = multivariate_normal(mu, cov) 123 | S = rv.pdf(pos) 124 | field = S.copy() / (np.sum(S)) 125 | field0 = field.copy() 126 | 127 | # A = 0.1 128 | # omega = 1 129 | # epsilon = 0.25 130 | # u = -np.pi*A*np.sin(np.pi*(1)*X)*np.cos(np.pi*Y) 131 | # v = -np.pi*A*np.cos(np.pi*(1)*X)*np.sin(np.pi*Y)*(1-2*epsilon) 132 | 133 | u = np.sin(1 * X) * np.cos(1 * Y) * 0.1 134 | v = np.cos(1 * X) * np.cos(1 * Y) * 0.1 135 | # u = np.random.rand(N, N)*1 136 | # v = np.random.rand(N, N)*1 137 | # u = np.ones((N,N))*1 138 | # v = np.ones((N,N))*1 139 | cu = dt * np.max(u) / dx 140 | cv = dt * np.max(v) / dx 141 | assert (((cu ** 2) / r) + ((cv ** 2) / r) <= 2), ('dt might be too high or diffusion constant might be too low') 142 | 143 | return field, field0, u, v 144 | 145 | 146 | def write_field(field, step): 147 | plt.gca().clear() 148 | plt.imshow(field, cmap='jet') 149 | plt.axis('off') 150 | plt.savefig('heat_{0:03d}.png'.format(step)) 151 | 152 | 153 | def exchange(field): 154 | # send down, receive from up 155 | sbuf = field[-2, :] 156 | rbuf = field[0, :] 157 | comm.Sendrecv(sbuf, dest=down, recvbuf=rbuf, source=up) 158 | # send up, receive from down 159 | sbuf = field[1, :] 160 | rbuf = field[-1, :] 161 | comm.Sendrecv(sbuf, dest=up, recvbuf=rbuf, source=down) 162 | 163 | 164 | def iterate(field, local_field, local_field0, timesteps, image_interval,field_store_counter): 165 | step = 1 166 | try: 167 | pbar = tqdm(total=int(timesteps)) 168 | except: 169 | pass 170 | counter=0 171 | indexnr = 1 172 | for t in range(0, timesteps ): 173 | if (t == 0): 174 | u_init = np.load(filenames_u[0]) 175 | v_init = np.load(filenames_v[0]) 176 | shape = u_init.shape 177 | dtype = u_init.dtype 178 | comm.bcast(shape, 0) # broadcast dimensions 179 | comm.bcast(dtype, 0) # broadcast data type 180 | n = int(shape[0] / size) # number of rows for each MPI task 181 | m = shape[1] # number of columns in the field 182 | buff = np.zeros((n, m), dtype) 183 | 184 | comm.Scatter(u_init, buff, 0) # scatter the data 185 | local_u = np.zeros((n + 2, m), dtype) # need two ghost rows! 186 | local_u[1:-1, :] = buff # copy data to non-ghost rows 187 | 188 | comm.Scatter(v_init, buff, 0) # scatter the data 189 | local_v = np.zeros((n + 2, m), dtype) # need two ghost rows! 190 | local_v[1:-1, :] = buff # copy data to non-ghost rows 191 | 192 | exchange(local_field0) 193 | exchange(local_u) 194 | exchange(local_v) 195 | evolve(local_field, local_field0, local_u, local_v, D, dt, dx2, dy2) 196 | 197 | if (t != 0): 198 | if (t%breaker==1): 199 | u = np.load(filenames_u[indexnr]) 200 | v = np.load(filenames_v[indexnr]) 201 | indexnr +=1 202 | shape = u[0].shape 203 | dtype = u[0].dtype 204 | comm.bcast(shape, 0) # broadcast dimensions 205 | comm.bcast(dtype, 0) # broadcast data type 206 | n = int(shape[0] / size) # number of rows for each MPI task 207 | m = shape[1] # number of columns in the field 208 | buff = np.zeros((n, m), dtype) 209 | 210 | comm.Scatter(u[int(counter%breaker)], buff, 0) # scatter the data 211 | local_u = np.zeros((n + 2, m), dtype) # need two ghost rows! 212 | local_u[1:-1, :] = buff # copy data to non-ghost rows 213 | 214 | comm.Scatter(v[int(counter%breaker)], buff, 0) # scatter the data 215 | local_v = np.zeros((n + 2, m), dtype) # need two ghost rows! 216 | local_v[1:-1, :] = buff # copy data to non-ghost rows 217 | 218 | 219 | 220 | exchange(local_field0) 221 | exchange(local_u) 222 | exchange(local_v) 223 | evolve(local_field, local_field0, local_u, local_v, D, dt, dx2, dy2) 224 | step += 1 225 | counter +=1 226 | try: 227 | pbar.update(1) 228 | except: 229 | pass 230 | 231 | if(t%breaker==0): 232 | comm.Gather(local_field[1:-1, :], field, root=0) 233 | if (rank==0): 234 | np.save('datafiles/concentrations/field_' + str(round(t)), field) 235 | 236 | ''' 237 | if (t % image_interval == 0 and t!=0): 238 | comm.Gather(local_field[1:-1, :], field, root=0) 239 | comm.Gather(local_u[1:-1, :], u[int(counter%breaker)], root=0) 240 | comm.Gather(local_v[1:-1, :], v[int(counter%breaker)], root=0) 241 | if rank == 0: 242 | # write_field(field, m) 243 | # plt.imshow(field.T,cmap='jet') 244 | axs[0].imshow(field, cmap='jet') # ,vmax=1,vmin=0) 245 | axs[1].imshow((v[int(counter%breaker)]**2+u[int(counter%breaker)]**2), cmap='jet') 246 | # plt.show() 247 | plt.pause(0.05) 248 | 249 | ''' 250 | 251 | def main(): 252 | # Read and scatter the initial temperature field 253 | if rank == 0: 254 | field, field0, u, v = init_fields(X, Y) 255 | shape = field.shape 256 | dtype = field.dtype 257 | comm.bcast(shape, 0) # broadcast dimensions 258 | comm.bcast(dtype, 0) # broadcast data type 259 | else: 260 | field = None 261 | u = None 262 | v = None 263 | shape = comm.bcast(None, 0) 264 | dtype = comm.bcast(None, 0) 265 | if shape[0] % size: 266 | raise ValueError('Number of rows in the field (' \ 267 | + str(shape[0]) + ') needs to be divisible by the number ' \ 268 | + 'of MPI tasks (' + str(size) + ').') 269 | n = int(shape[0] / size) # number of rows for each MPI task 270 | m = shape[1] # number of columns in the field 271 | buff = np.zeros((n, m), dtype) 272 | comm.Scatter(field, buff, 0) # scatter the data 273 | local_field = np.zeros((n + 2, m), dtype) # need two ghost rows! 274 | local_field[1:-1, :] = buff # copy data to non-ghost rows 275 | local_field0 = np.zeros_like(local_field) # array for previous time step 276 | local_field0[:] = local_field[:] 277 | 278 | t0 = time.time() 279 | print('starting iterations') 280 | iterate(field, local_field, local_field0, timesteps, image_interval,field_store_counter) 281 | print('end iterations') 282 | t1 = time.time() 283 | 284 | if __name__ == '__main__': 285 | main() 286 | -------------------------------------------------------------------------------- /2D/Advection/2D_Advection_second_upwind.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.stats import multivariate_normal 4 | from numba import jit 5 | ########### 6 | # Read in velocity field files 7 | 8 | dt_list = np.load('dt_vector.npy') 9 | u_vel = np.load('u_vel.npy') 10 | v_vel = np.load('v_vel.npy') 11 | vorticity = np.load('vorticity.npy') 12 | print('-----------Loaded in text files-----') 13 | print('Vorticity shape: ', np.shape(vorticity)) 14 | L = np.pi 15 | N = int(np.sqrt(np.shape(u_vel[0, :]))[0]) 16 | dx = 2 * L / N 17 | dy = dx 18 | x = np.linspace(1 - N / 2, N / 2, N) * dx 19 | y = np.linspace(1 - N / 2, N / 2, N) * dx 20 | [X, Y] = np.meshgrid(x, y) 21 | 22 | time_levels = int(len(dt_list)) 23 | dt = dt_list[1] - dt_list[0] 24 | t_end = dt_list[-1] 25 | 26 | ## Reshape velocity data to 2D: 27 | print('U-velocity shape:', np.shape(u_vel)) 28 | 29 | u_vel = np.reshape(u_vel, ([time_levels, N, N]), 1) 30 | v_vel = np.reshape(v_vel, ([time_levels, N, N]), 1) 31 | 32 | # print(np.shape(u_vel[-1])) 33 | # plt.imshow(np.abs((u_vel[1] ** 2) + (v_vel[-1] ** 2)), cmap='jet') 34 | # plt.show() 35 | 36 | S = np.zeros([N, N]) 37 | S_new = S.copy() 38 | res = S.copy() 39 | #S[int(N / 2) - int(N / 10):int(N / 2) + int(N / 10), 40 | #int(N / 2) - int(N / 10):int(N / 2) + int(N / 10)] = 1 41 | 42 | 43 | 44 | 45 | pos = np.dstack((X, Y)) 46 | mu = np.array([1, 2]) 47 | cov = np.array([[.5, .25],[.25, .5]]) 48 | rv = multivariate_normal(mu, cov) 49 | S = rv.pdf(pos) 50 | print('Initial Sediment: ',np.sum(np.sum(S))) 51 | 52 | fig,axs = plt.subplots(2) 53 | fig.suptitle('Title here') 54 | #ax = plt.axes(xlim=(0,N),ylim=(0,N)) 55 | #domain, = ax.plot(S) 56 | 57 | scheme = 'Second_Upwind' 58 | if scheme=='Second_Upwind': 59 | for t in range(time_levels): 60 | # Temporal loop 61 | for i in range(N): 62 | # Spatial loop, x-direction 63 | for j in range(N): 64 | # Spatial loop, y-direction 65 | #TODO incorporate these variables into a function 66 | u_plus = np.max(u_vel[t, i, j], 0) 67 | u_min = np.min(u_vel[t, i, j], 0) 68 | v_plus = np.max(v_vel[t, i, j], 0) 69 | v_min = np.min(v_vel[t, i, j], 0) 70 | Sx_plus = (-S[(i+2)%N,j]+4*S[(i+1)%N,j]-3*S[i,j])/(2*dx) 71 | Sx_min = (3*S[i,j]-4*S[i-1,j]+S[i-2,j])/(2*dx) 72 | Sy_plus = (-S[i,(j+2)%N]+4*S[i,(j+1)%N]-3*S[i,j])/(2*dy) 73 | Sy_min = (3*S[i,j]-4*S[i,j-1]+S[i,j-2])/(2*dy) 74 | 75 | res[i,j] = -(u_plus*Sx_min+u_min*Sx_plus)-(v_plus*Sy_min+v_min*Sy_plus) 76 | S_new = S+dt*res 77 | # domain.set_data(S) 78 | S = S_new.copy() 79 | max_u = np.max(np.abs(u_vel)) 80 | max_v = np.max(np.abs(v_vel)) 81 | max_vel = np.max([max_u,max_v]) 82 | cfl = np.abs(max_vel*dt/dx) 83 | print('Max CFL value: ', cfl,' Time level: ',dt_list[t],' Total Sediment: ', np.sum(np.sum(S))) 84 | # TODO VALUES OF SEDIEMTN OSCILLATES BETWEEN POSITIVE ANG NEGATIVE, WHY?? 85 | 86 | plt.suptitle('Max CFL value: '+np.str(cfl)+' Time level: '+str(dt_list[t])) 87 | axs[0].imshow(S.T,cmap='jet',vmin=0,vmax=1) 88 | axs[1].imshow(np.abs((u_vel[t] ** 2) + (v_vel[t] ** 2)), cmap='jet') 89 | plt.pause(0.005) 90 | plt.show() 91 | 92 | 93 | ''' M = np.hypot(u_vel[t], v_vel[t]) 94 | Q = axs[1].quiver(X, Y, u_vel[t], v_vel[t], M, units='x', pivot='tip', 95 | width=0.022, 96 | scale=1 / 0.15) 97 | qk = axs[1].quiverkey(Q, 0.9, 0.9, 1, r'$1 \frac{m}{s}$', labelpos='E', 98 | coordinates='figure') 99 | axs[1].scatter(X, Y, color='0.5', s=1) 100 | ''' 101 | 102 | -------------------------------------------------------------------------------- /2D/Advection/2d_advection_diff_central_no_parallell.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.stats import multivariate_normal 4 | 5 | N = 64 6 | dt = 0.01 7 | tend = 50 8 | L = 2 * np.pi 9 | dx = L / N 10 | x = np.arange(0, N, 1) * L / N 11 | y = np.arange(0, N, 1) * L / N 12 | [X, Y] = np.meshgrid(x, y) 13 | n = int(np.ceil(tend / dt)) 14 | 15 | u = np.ones((N, N)) * 1 16 | v = np.ones((N, N)) * 1 17 | 18 | #u = np.sin(X) * np.cos(Y) * 0.1 19 | #v = np.cos(X) * np.cos(Y) * 0.1 20 | 21 | # u = np.random.rand(N, N)*1 22 | # v = np.random.rand(N, N)*1 23 | 24 | D = 0.08 25 | r = dt * D / (dx ** 2) 26 | cell_Reynold = np.max(u) / N / D 27 | Peclet = np.max(u) / N / D 28 | print('Peclet: ', Peclet, ' 2*von Neumann: ', 2 * r) 29 | # assert Peclet <=2 30 | 31 | # plt.imshow(u) 32 | # plt.show() 33 | 34 | pos = np.dstack((X, Y)) 35 | mu = np.array([2, 3]) 36 | cov = np.array([[.05, .010], [.010, .05]]) 37 | rv = multivariate_normal(mu, cov) 38 | S = rv.pdf(pos) 39 | sol = S.copy()/(np.sum(S)) 40 | sol_new = S.copy() 41 | 42 | # sol = np.zeros((N,N)) 43 | # sol[int(N/2),int(N/2)]=5 44 | # sol_new = sol.copy() 45 | 46 | # sol = np.cos(X)*np.sin(Y) 47 | # plt.imshow(sol) 48 | # plt.show() 49 | 50 | # fig = plt.figure() 51 | # ax = fig.add_subplot(111) 52 | # Ln, = ax.plot(sol) 53 | # ax.set_xlim([0, 2 * np.pi]) 54 | # plt.ion()1 55 | # plt.show() 56 | 57 | fig, axs = plt.subplots(2) 58 | fig.suptitle('Title here') 59 | 60 | for t in range(n): 61 | for i in range(len(x)): 62 | for j in range(len(x)): 63 | cu = dt * u[i, j] / dx 64 | cv = dt * v[i, j] / dx 65 | assert (((cu ** 2) / r) + ((cv ** 2) / r) <= 2),('dt might be too high or diffusion constant might be too low') 66 | assert (0 < 2 * r <= 0.5) 67 | sol_new[i, j] = sol[i, j] \ 68 | + cu / 2 * (sol[(i - 1), j] - sol[(i + 1) % N, j ]) \ 69 | + cv / 2 * (sol[i % N, j - 1] - sol[i % N, (j + 1) % N]) \ 70 | + r * (sol[i - 1, j ] - 2 * sol[i , j ] + sol[(i + 1) % N, j ]) \ 71 | + r * (sol[i , j - 1] - 2 * sol[i , j ] + sol[i , (j + 1) % N]) 72 | sol = sol_new 73 | sol = sol/(np.sum(sol)) #cheat with mass conservation. Assume uniform loss over each cell 74 | 75 | # plt.plot(x,sol_new) 76 | 77 | # Ln.set_ydata(sol) 78 | # Ln.set_xdata(X) 79 | if (t % 20 == 0): 80 | # plt.clf() 81 | print('time level: ', t, ' Total Sediment: ', np.sum(sol)) 82 | # plt.imshow(sol) 83 | # plt.contour(sol) 84 | # plt.pause(0.005) 85 | # plt.imshow(sol) 86 | # plt.show() 87 | axs[0].imshow(sol, cmap='jet') # ,vmax=1,vmin=0) 88 | axs[1].imshow(v, cmap='jet') 89 | plt.pause(0.05) 90 | -------------------------------------------------------------------------------- /2D/Advection/Variance_computation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.animation as animation 4 | import scipy.stats as sp 5 | from scipy import integrate 6 | from skimage import measure 7 | from matplotlib import rc 8 | from basic_units import radians, degrees, cos 9 | from radians_plot import * 10 | import powerlaw as pl 11 | 12 | plt.style.use('bmh') 13 | 14 | tend = 100 15 | dt = 1e-3 16 | timesteps = int(np.ceil(tend / dt)) 17 | 18 | N = 256 19 | L = 2 * np.pi 20 | dx = L / N 21 | x = np.arange(0, N, 1) * L / N 22 | y = np.arange(0, N, 1) * L / N 23 | [X, Y] = np.meshgrid(x, y) 24 | 25 | con_x = [] 26 | con_y = [] 27 | 28 | var_x = [] 29 | var_y = [] 30 | 31 | field_mean_x = [] 32 | field_mean_Y = [] 33 | 34 | fig, axs = plt.subplots(1) 35 | 36 | field_mean_x = [] 37 | field_mean_y = [] 38 | 39 | 40 | def closest(lst, K): 41 | return lst[min(range(len(lst)), key=lambda i: abs(lst[i] - K))] 42 | 43 | 44 | def intargument(field, N, x,mean_or_central,mean_x,mean_y): 45 | 46 | con_x = field.sum(axis=0) 47 | con_y = field.sum(axis=1) 48 | mean_npx = np.mean(con_x) 49 | mean_npy = np.mean(con_y) 50 | close_value_x = closest(con_x, mean_npx) 51 | close_value_y = closest(con_y, mean_npy) 52 | 53 | if mean_x =='str' and mean_y =='str': 54 | meanvalue_x = x[np.where(con_x == close_value_x)][0] 55 | meanvalue_y = y[np.where(con_y == close_value_y)][0] 56 | else: 57 | meanvalue_x = float(mean_x) 58 | meanvalue_y = float(mean_y) 59 | 60 | argumentx = [] 61 | argumenty = [] 62 | for i in range(N): 63 | argumentx.append(((x[i] - meanvalue_x*mean_or_central) ** 2) * con_x[i]) 64 | argumenty.append(((y[i] - meanvalue_y*mean_or_central) ** 2) * con_y[i]) 65 | return argumentx, argumenty,meanvalue_x,meanvalue_y 66 | 67 | def first_moment(field, N, x): 68 | 69 | con_x = field.sum(axis=0) 70 | con_y = field.sum(axis=1) 71 | argumentfirstx = [] 72 | argumentfirsty = [] 73 | for i in range(N): 74 | argumentfirstx.append(((x[i])) * con_x[i]) 75 | argumentfirsty.append(((y[i])) * con_y[i]) 76 | return argumentfirstx, argumentfirsty 77 | 78 | def second_moment(field, N, x,firstx,firsty): 79 | 80 | con_x = field.sum(axis=0) 81 | con_y = field.sum(axis=1) 82 | argumentx = [] 83 | argumenty = [] 84 | for i in range(N): 85 | argumentx.append(((x[i]-firstx)**2) * con_x[i]) 86 | argumenty.append(((y[i]-firsty)**2) * con_y[i]) 87 | return argumentx, argumenty 88 | 89 | 90 | 91 | intlist = [] 92 | mean_or_central=0 93 | flipbool = False 94 | meanvalue_x = 'str' 95 | meanvalue_y = 'str' 96 | 97 | for i in range(0, 100): 98 | #print(i) 99 | flipbool = False 100 | field = np.load('datafiles/con_1/field_' + str(i * 1000) + '.npy') 101 | 102 | [argumentx, argumenty,meanvalue_x,meanvalue_y] = intargument(field, N, x,mean_or_central,0,0) 103 | half_central_integral_x = integrate.simps(argumentx, x=x, dx=dx) * 0.5 104 | half_central_integral_y = integrate.simps(argumenty, x=x, dx=dx) * 0.5 105 | 106 | idx_x = 1 107 | while flipbool==False: 108 | lengthX = idx_x 109 | xlist = x[0:idx_x] 110 | running_central_integral_x = integrate.simps(argumentx[0:idx_x], x=xlist, dx=dx) 111 | idx_x +=1 112 | if running_central_integral_x >= half_central_integral_x: 113 | flipbool = True 114 | idx_y = 1 115 | flipbool = False 116 | while flipbool==False: 117 | lengthX = idx_y 118 | xlist = x[0:idx_y] 119 | running_central_integral_y = integrate.simps(argumenty[0:idx_y], x=xlist, dx=dx) 120 | idx_y +=1 121 | if running_central_integral_y >= half_central_integral_y: 122 | flipbool = True 123 | 124 | 125 | #[firstmomentx_arg,firstmomenty_arg]=first_moment(field,N,x) 126 | #firstmomentx = integrate.simps(firstmomentx_arg,x=x,dx=dx) 127 | #firstmomenty = integrate.simps(firstmomenty_arg,x=x,dx=dx) 128 | #[argumentx,argumenty]=second_moment(field,N,x,firstmomentx,firstmomenty) 129 | mean_or_central=0 130 | [argumentx, argumenty, meanvalue_x, meanvalue_y] = intargument(field, N, x, mean_or_central, x[idx_x], x[idx_y]) 131 | intlist.append(integrate.simps(argumenty, x=x, dx=dx)) 132 | con_x.append(field.sum(axis=0)) 133 | con_y.append(field.sum(axis=1)) 134 | var_x.append(np.var(field, axis=0)) 135 | var_y.append(np.var(field, axis=1)) 136 | field_mean_x.append((np.mean(var_x[i]))) 137 | field_mean_y.append((np.mean(var_y[i]))) 138 | # print(meanvalue_y) 139 | ''' 140 | if (i%9==0): 141 | print(i) 142 | plt.contourf(X,Y,field,levels=15,xunits=radians, yunits=radians,cmap='jet') 143 | plt.xlim([0,2*np.pi]) 144 | plt.ylim([0, 2 * np.pi]) 145 | #plt.xlabel('x-direction (m)') 146 | #plt.ylabel('y-direction (m)') 147 | ax = plt.gca() 148 | ax.set_xlabel('x-direction (m)') 149 | ax.set_ylabel('y-direction (m)') 150 | 151 | ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 152 | ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 153 | ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 154 | ax.yaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 155 | ax.yaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 156 | ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 157 | plt.colorbar() 158 | plt.show() 159 | ''' 160 | ''' 161 | axs[0].imshow(field, cmap='jet') 162 | axs[1].plot(x,con_x[i]) 163 | axs[1].plot([meanvalue_x],[0],'r*') 164 | 165 | axs[2].plot(x,con_y[i]) 166 | axs[2].plot([meanvalue_y],[0],'r*') 167 | axs[3].plot(intlist) 168 | plt.pause(0.03) 169 | axs[0].axes.clear() 170 | axs[1].axes.clear() 171 | axs[2].axes.clear() 172 | axs[3].axes.clear() 173 | ''' 174 | 175 | 176 | idxa = 2 177 | idxb = 7 178 | idxc = 9 179 | idxd = 20 180 | 181 | 182 | t = np.arange(0,100,1) 183 | tpower1 = 0.18*t**(-0.2) 184 | tpower2 = 0.026*t**(1) 185 | #tpower = new_list = [n-0 for n in tpower] 186 | 187 | 188 | m1,c1 = np.polyfit(np.log(t[idxa:idxb]),np.log(intlist[idxa:idxb]),1) 189 | log_fit1 = m1*np.log(t[idxa:idxb])+c1 190 | 191 | m2,c2 = np.polyfit(np.log(t[idxc:idxd]),np.log(intlist[idxc:idxd]),1) 192 | log_fit2 = m2*np.log(t[idxc:idxd])+c2 193 | 194 | 195 | #plt.loglog(t[idxc:idxd],tpower1[idxc:idxd],'b--') 196 | plt.loglog(t[idxa:idxb],np.exp(log_fit1),'b--') 197 | plt.loglog(t[idxc:idxd],np.exp(log_fit2),'r--') 198 | 199 | #plt.loglog(t[idxa:idxb],tpower2[idxa:idxb],'r--') 200 | plt.loglog(intlist,'k-') 201 | axs.set_xscale('log') 202 | plt.xlabel('$\mathrm{time \;(s)}$') 203 | plt.ylabel('$\\sigma_{y}^{2} \; \mathrm{(m^2)}$') 204 | 205 | plt.legend(['powerlaw $\propto t^{%.2f}$'%(m1),'powerlaw $\propto t^{%.2f}$'%(m2),'SM']) 206 | plt.show() 207 | -------------------------------------------------------------------------------- /2D/Advection/read_field.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.animation as animation 4 | 5 | tend = 100 6 | dt = 1e-3 7 | timesteps = int(np.ceil(tend/dt)) 8 | fig = plt.figure() 9 | ims = [] 10 | for i in range(0,100): 11 | field = np.load('datafiles/concentrations/field_'+str(i*1000)+'.npy') 12 | im = plt.imshow(field,cmap='jet',animated=True) 13 | ims.append([im]) 14 | print('saving field',i) 15 | ani = animation.ArtistAnimation(fig, ims, interval=2, blit=True, repeat_delay=None) 16 | ani.save('fieldspread.gif', writer='imagemagick') -------------------------------------------------------------------------------- /2D/Conventional_formulation/2DNS_plot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import matplotlib.pyplot as plt 3 | from numpy import * 4 | from mayavi import mlab 5 | from basic_units import radians, degrees, cos 6 | from radians_plot import * 7 | import matplotlib.animation as animation 8 | import types 9 | 10 | # X = mgrid[rank * Np:(rank + 1) * Np, :N, :N].astype(float) * 2 * pi / N 11 | # U = empty((3, Np, N, N),dtype=float32) 12 | with open('X_2D.pkl', 'rb') as g: 13 | X_2D = pickle.load(g) 14 | with open('U_2D.pkl', 'rb') as f: 15 | U_2D = pickle.load(f) 16 | #with open('animate_U_x.pkl', 'rb') as h: 17 | # animate_U_x_pkl = pickle.load(h) 18 | print(U_2D) 19 | print(np.shape(U_2D)) 20 | print(np.shape(X_2D)) 21 | #print(np.shape(animate_U_x_pkl)) 22 | 23 | 24 | X_x = X_2D[0][0] 25 | X_y = X_2D[0][1] 26 | U_x = U_2D[0][0] 27 | U_y = U_2D[0][1] 28 | 29 | print(np.shape(U_x)) 30 | 31 | # Plot contour lines of the velocity in X-direction in the middle of the cube. 32 | # X mesh is listed by X([z-levels,],[y-levels],[x-levels]), addressing, X[2] points to 33 | # the mesh in x-direction. 34 | plt.contourf(X_x, X_y, U_x, 35 | xunits=radians, yunits=radians, levels=256, cmap=plt.get_cmap('jet')) 36 | ax = plt.gca() 37 | ax.set_xlabel('x') 38 | ax.set_ylabel('y') 39 | ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 40 | ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 41 | ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 42 | ax.yaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 43 | ax.yaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 44 | ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 45 | plt.show() 46 | 47 | 48 | fig = plt.figure() 49 | # ims is a list of lists, each row is a list of artists to draw in the 50 | # current frame; here we are just animating one artist, the image, in 51 | # each frame 52 | ims = [] 53 | for i in range(0,len(animate_U_x_T),10): 54 | print('Appending image nr: '+str(i)) 55 | im = plt.imshow(animate_U_x_T[i][mid_idx], animated=True) 56 | ims.append([im]) 57 | 58 | ani = animation.ArtistAnimation(fig, ims, interval=100, blit=True, 59 | repeat_delay=None) 60 | 61 | #ani.save('dynamic_images.mp4') 62 | ani.save('animation.gif', writer='imagemagick', fps=30) 63 | 64 | plt.show() -------------------------------------------------------------------------------- /2D/Conventional_formulation/2DNS_spectral.py: -------------------------------------------------------------------------------- 1 | # 2D implementation of a spectral method for Navier-Stokes equations 2 | from numpy import * 3 | from numpy.fft import fftfreq, fft, ifft, irfft2, rfft2, rfftn, irfftn 4 | from mpi4py import MPI 5 | import pickle 6 | import matplotlib.pyplot as plt 7 | from tqdm import tqdm 8 | 9 | # U is set to dtype float32 10 | # Reynoldsnumber determined by nu Re = 1600, nu = 1/1600 11 | nu = 0.0000625 12 | # nu = 0.00000625 13 | T = 5 14 | dt = 0.001 15 | N = int(2 ** 6) 16 | #N = int(N / 2 + 1) 17 | # comm = MPI.COMM_WORLD 18 | num_processes = 1 19 | rank = 0 20 | # Np = int(N / num_processes) 21 | Np=N 22 | Nhalf=int(N/2+1) 23 | X = mgrid[rank * Np:(rank + 1) * Np, :N].astype(float) * 2 * pi / N 24 | # using np.empty() does not create a zero() list! 25 | U = zeros((2, N, N), dtype=float32) 26 | U_hat = zeros((2, N, Nhalf), dtype=complex) 27 | P = empty((N, N)) 28 | P_hat = zeros((N, Nhalf), dtype=complex) 29 | U_hat0 = zeros((2, N, Nhalf), dtype=complex) 30 | U_hat1 = zeros((2, N, Nhalf), dtype=complex) 31 | dU = zeros((2, N, Nhalf), dtype=complex) 32 | Uc_hat = zeros((N, Nhalf), dtype=complex) 33 | Uc_hatT = zeros((N, Nhalf), dtype=complex) 34 | U_mpi = zeros((num_processes, N, N, N), dtype=complex) 35 | curl = zeros((2,N, N)) 36 | animate_U_x = zeros((int(T / dt), N, N), dtype=float32) 37 | save_animation = True 38 | kx = fftfreq(N, 1. / N) 39 | ky = kx[:(Nhalf)].copy(); 40 | #kz[-1] *= -1 41 | K = array(meshgrid(kx, ky, indexing="ij"), dtype=int) 42 | K2 = sum(K * K, 0, dtype=int) 43 | K_over_K2 = K.astype(float) / where(K2 == 0, 1, K2).astype(float) 44 | kmax_dealias = 2. / 3. * (N) 45 | # dealias = array( 46 | # (abs(K[0]) < kmax_dealias) * (abs(K[1]) < kmax_dealias) * (abs(K[2]) < kmax_dealias), 47 | # dtype=bool) 48 | 49 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 50 | b = [0.5, 0.5, 1.] 51 | 52 | 53 | def ifftn_mpi(fu, u): 54 | # Inverse Fourier transform 55 | # Uc_hat[:] = ifft(fu, axis=0) 56 | # comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX]) 57 | # Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape) 58 | # print(['fu ifftn shape: '],shape(fu)) 59 | u[:] = irfft2(fu) 60 | return u 61 | 62 | 63 | def fftn_mpi(u, fu): 64 | 65 | # Forward Fourier transform 66 | # Uc_hatT[:] = rfft2(u, axes=(0, 1)) 67 | # U_mpi[:] = rollaxis(Uc_hatT.reshape(num_processes, Np, N), 1) 68 | # comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX]) 69 | 70 | # print('shape of u to be transformed: ',shape(u)) 71 | # print('shape of fu to be output: ',shape(fu)) 72 | fu[:] = rfft2(u) 73 | return fu 74 | 75 | 76 | def Cross(a, b, c): 77 | # 3D cross product 78 | # c[0] = fftn_mpi(a[1] * b[2] - a[2] * b[1], c[0]) 79 | # c[1] = fftn_mpi(a[2] * b[0] - a[0] * b[2], c[1]) 80 | # print('shape of U: ',shape(a)) 81 | # print('shape of curl: ',shape(b)) 82 | c = fftn_mpi(a[0] * b[1] - a[1] * b[0], c) 83 | return c 84 | 85 | 86 | def Curl(a, c): 87 | # 3D curl operator 88 | # print(shape(a)) 89 | # print(shape(c)) 90 | c = ifftn_mpi(1j * (K[0] * a[1] - K[1] * a[0]), c) 91 | # c[1] = ifftn_mpi(1j * (K[2] * a[0] - K[0] * a[2]), c[1]) 92 | # c[0] = ifftn_mpi(1j * (K[1] * a[2] - K[2] * a[1]), c[0]) 93 | return c 94 | 95 | 96 | def computeRHS(dU, rk): 97 | # Compute residual of time integral as specified in pseudo spectral Galerkin method 98 | if rk > 0: 99 | for i in range(2): 100 | U[i] = ifftn_mpi(U_hat[i], U[i]) 101 | curl[:] = Curl(U_hat, curl) 102 | # print('input to cross: ',shape(U),' ',shape(curl)) 103 | dU = Cross(U, curl, dU) 104 | # dU *= dealias 105 | P_hat[:] = sum(dU * K_over_K2, 0, out=P_hat) 106 | dU -= P_hat * K 107 | dU -= nu * K2 * U_hat 108 | return dU 109 | 110 | 111 | # initial condition and transformation to Fourier space 112 | U[0] = sin(X[0]) * cos(X[1]) # * cos(X[2]) 113 | U[1] = -cos(X[0]) * sin(X[1]) # * cos(X[2]) 114 | # U[2] = 0 115 | for i in range(2): 116 | U_hat[i] = fftn_mpi(U[i], U_hat[i]) 117 | 118 | # Time integral using a Runge Kutta scheme 119 | t = 0.0 120 | tstep = 0 121 | save_nr = 1 122 | mid_idx = int(N / 2) 123 | pbar = tqdm(total=int(T / dt)) 124 | while t < T - 1e-8: 125 | 126 | t += dt; 127 | U_hat1[:] = U_hat0[:] = U_hat 128 | for rk in range(4): 129 | # Run RK4 temporal integral method 130 | dU = computeRHS(dU, rk) 131 | if rk < 3: U_hat[:] = U_hat0 + b[rk] * dt * dU 132 | U_hat1[:] += a[rk] * dt * dU 133 | U_hat[:] = U_hat1[:] 134 | for i in range(2): 135 | # Inverse Fourier transform after RK4 algorithm 136 | U[i] = ifftn_mpi(U_hat[i], U[i]) 137 | # if save_animation == True and tstep % save_nr == 0: 138 | # Save the animation every "save_nr" time step 139 | animate_U_x[tstep] = U[0].copy() 140 | tstep += 1 141 | pbar.update(1) 142 | 143 | # k = comm.reduce(0.5 * sum(U * U) * (1. / N) ** 3) 144 | # if rank == 0: 145 | # assert round(k - 0.124953117517, 7) == 0 146 | pbar.close() 147 | 148 | # Gather the scattered data and store into two variables, X and U. 149 | # Root is rank of receiving process (core 1) 150 | # U_gathered = comm.gather(U, root=0) 151 | # X_gathered = comm.gather(X, root=0) 152 | 153 | # animate_U_x_T = animate_U_x.transpose((0, 3, 2, 1)) 154 | # animate_save_T = [animate_U_x_T[i][int(N / 2)] for i in range(len(animate_U_x_T))] 155 | 156 | with open('U_2D' + '.pkl', 'wb') as f: 157 | pickle.dump([U], f) 158 | 159 | with open('X_2D.pkl', 'wb') as g: 160 | pickle.dump([X], g) 161 | 162 | #if save_animation == True: 163 | # # animate_U_x_gather = comm.gather(animate_U_x, root=0) 164 | # with open('animate_U_x_' + str(rank) + '2D.pkl', 'wb') as h: 165 | # pickle.dump([animate_save_T], h) 166 | -------------------------------------------------------------------------------- /2D/Conventional_formulation/2d_spectral_plot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import matplotlib.animation as animation 5 | import types 6 | 7 | from matplotlib.cm import ScalarMappable 8 | 9 | with open('solve_matrix.pkl', 'rb') as f: 10 | solve= pickle.load(f) 11 | solve_matrix=solve[0] 12 | max_val = solve_matrix.y.max() 13 | min_val = solve_matrix.y.min() 14 | print(max_val) 15 | N=int(np.sqrt(len(solve_matrix.y[:,-1]))) 16 | omega_vector = solve_matrix.y[:,-1] 17 | omega = np.reshape(omega_vector, ([N, N])) 18 | 19 | plt.contourf(omega,levels=30,cmap='jet') 20 | cbar = plt.colorbar() 21 | ''' 22 | fig = plt.figure() 23 | # ims is a list of lists, each row is a list of artists to draw in the 24 | # current frame; here we are just animating one artist, the image, in 25 | # each frame 26 | ims = [] 27 | for i in range(len(solve_matrix.y[0])): 28 | if i%1==0: 29 | print('Appending image nr: '+str(i)) 30 | omega_vector = solve_matrix.y[:, i] 31 | omega = np.reshape(omega_vector, ([N, N])) 32 | #im = plt.contourf(omega,cmap='jet',vmax=max_val,vmin=min_val,levels=100,animated=True) 33 | im = plt.imshow(omega,animated=True,cmap='jet') 34 | # def setvisible(self, vis): 35 | # for c in self.collections: c.set_visible(vis) 36 | #im.set_visible = types.MethodType(setvisible, im) 37 | # im.axes = plt.gca() 38 | # im.figure = fig 39 | ims.append([im]) 40 | 41 | cbar = plt.colorbar(im) 42 | #ScalarMappable.set_clim(min_val,max_val) 43 | #cbar.set_ticks(np.linspace(min_val,max_val,10)) 44 | cbar.set_label('Vorticity magnitude [m/s]') 45 | plt.xlim(0,N) 46 | plt.ylim(0,N) 47 | plt.xlabel('x [m]') 48 | plt.ylabel('y [m]') 49 | #plt.axes().set_aspect('equal') 50 | 51 | ani = animation.ArtistAnimation(fig, ims, interval=200, blit=True, 52 | repeat_delay=None) 53 | 54 | #ani.save('dynamic_images.mp4') 55 | ani.save('animation.gif', writer='imagemagick') 56 | 57 | plt.show() 58 | 59 | ''' 60 | 61 | -------------------------------------------------------------------------------- /2D/vorticity_formulation/2DSpectral_vorticity.py: -------------------------------------------------------------------------------- 1 | # Solver of 2D Navier Stokes equation on streamfunction-vorticity formulation. 2 | # TODO fix matplotlib such that one can plot two figures, one with velocity and one with 3 | # vorticity simultaneously. 4 | # TODO FIX MAPPING STRUCTURE IN FOLDER. DELETE UNECESSARY FILES!!! 5 | import numpy as np 6 | from numpy.fft import fftfreq, fft, ifft, irfft2, rfft2 7 | import random 8 | from numpy.random import seed, uniform 9 | import scipy.integrate as integrate 10 | import matplotlib.pyplot as plt 11 | from tqdm import tqdm 12 | import matplotlib.animation as animation 13 | from numba import jit 14 | 15 | global u, v 16 | 17 | 18 | #################################################################################################### 19 | def init_Omega(omega_grid): 20 | # Set initial condition on the Omega vector in Fourier space 21 | seed(1969) 22 | omega_hat = np.fft.fft2(omega_grid) 23 | omega_hat[0, 4] = uniform() + 1j * uniform() 24 | omega_hat[1, 1] = uniform() + 1j * uniform() 25 | omega_hat[3, 0] = uniform() + 1j * uniform() 26 | # print(omega_hat) 27 | omega_IC = np.real(np.fft.ifft2(omega_hat)) 28 | omega_IC = omega_IC / np.max(omega_IC) 29 | reshaped_omega_IC = np.reshape(omega_IC, N2, 1) 30 | 31 | return reshaped_omega_IC 32 | 33 | 34 | def initialize(choice): 35 | # Set initial condition on the velocity vectors. 36 | if choice == 'random': 37 | u = np.array([[random.random() for i in range(N)] for j in range(N)]) 38 | v = np.array([[random.random() for i in range(N)] for j in range(N)]) 39 | u_hat = np.fft.fft2(u) 40 | v_hat = np.fft.fft2(v) 41 | omega_hat = (v_hat * Kx) - (u_hat * Ky) 42 | omega = np.real(np.fft.ifft2(omega_hat)) 43 | omega = omega / np.max(omega) 44 | omega_vector = np.reshape(omega, N2, 1) 45 | if choice == 'circle': 46 | x = np.arange(0, N) 47 | y = np.arange(0, N) 48 | u = np.ones((y.size, x.size)) * -1 49 | v = np.ones((y.size, x.size)) * -1 50 | cx = N / 2 51 | cy = N / 2 52 | r = int(N / 10) 53 | # The two lines below could be merged, but I stored the mask 54 | # for code clarity. 55 | mask = (x[np.newaxis, :] - cx) ** 2 + (y[:, np.newaxis] - cy) ** 2 < r ** 2 56 | u[mask] = 100 57 | v[mask] = -100 58 | u_hat = np.fft.fft2(u) 59 | v_hat = np.fft.fft2(v) 60 | omega_hat = ((v_hat * Kx) - (u_hat * Ky)) * 1j 61 | omega = np.real(np.fft.ifft2(omega_hat)) 62 | omega = omega / np.max(omega) 63 | omega_vector = np.reshape(omega, N2, 1) 64 | if choice == 'velocity_strips': 65 | u = np.ones([N, N]) * 0 66 | v = np.ones([N, N]) * 0 67 | u[int(N / 2 + N / 9 - N / 20):int(N / 2 + N / 9 + N / 20), :] = 1 68 | u[int(N / 2 - N / 9 - N / 20):int(N / 2 - N / 9 + N / 20), :] = -1 69 | #plt.imshow(u) 70 | #plt.show() 71 | 72 | u_hat = np.fft.fft2(u) 73 | v_hat = np.fft.fft2(v) 74 | omega_hat = ((v_hat * Kx) - (u_hat * Ky)) * 1j 75 | omega = np.real(np.fft.ifft2(omega_hat)) 76 | # omega = omega / np.max(omega) 77 | omega_vector = np.reshape(omega, N2, 1) 78 | 79 | if choice == 'omega_1': 80 | seed(1969) 81 | omega_grid = np.zeros([N, N]) 82 | omega_hat = np.fft.fft2(omega_grid) 83 | omega_hat[0, 4] = uniform() + 1j * uniform() 84 | omega_hat[1, 1] = uniform() + 1j * uniform() 85 | omega_hat[3, 0] = uniform() + 1j * uniform() 86 | omega_hat[2, 3] = uniform() + 1j * uniform() 87 | # print(omega_hat) 88 | omega = np.real(np.fft.ifft2(omega_hat)) 89 | omega = omega / np.max(omega) 90 | omega_vector = np.reshape(omega, N2, 1) 91 | return omega_vector 92 | 93 | #@jit 94 | def Rhs(t, omega_vector): # change order of arguments for different ode solver 95 | global u, v 96 | omega = np.reshape(omega_vector, ([N, N])).transpose() 97 | omega_hat = np.fft.fft2(omega) # *dealias 98 | # print(omega_hat) 99 | # omega_hat = np.multiply(omega_hat,dealias) 100 | # print(omega_hat) 101 | omx = np.real(np.fft.ifft2(1j * Kx * omega_hat * dealias)) 102 | omy = np.real(np.fft.ifft2(1j * Ky * omega_hat * dealias)) 103 | psi_hat = omega_hat * K2_inv 104 | u = np.real(np.fft.ifft2(-1j * Ky * psi_hat * dealias)) 105 | v = np.real(np.fft.ifft2(1j * Kx * psi_hat * dealias)) 106 | # print(u) 107 | # u = np.real(np.fft.ifft2(Dy * omega_hat * dealias)) 108 | # v = np.real(np.fft.ifft2(-Dx * omega_hat * dealias)) 109 | rhs = np.real(np.fft.ifft2(-nu * K2 * omega_hat) - u * omx - v * omy) 110 | # rhs *=dealias 111 | Rhs = np.reshape(rhs, N2, 1) 112 | return Rhs 113 | 114 | 115 | def writeToFile(solve): 116 | print('Writing files... ') 117 | np.save('dt_vector', solve.t) 118 | print('Time list written...') 119 | np.save('vorticity', solve.y) 120 | print('Vorticity list written...') 121 | u_vel, v_vel = convertVorticityToVelocity(solve.y) 122 | np.save('u_vel', u_vel) 123 | print('u velocity list written...') 124 | np.save('v_vel', v_vel) 125 | print('v-velocity list written...') 126 | # read with: new_data = np.loadtxt('test.txt') 127 | print('Finished writing files.') 128 | return 129 | 130 | 131 | def convertVorticityToVelocity(solve): 132 | vorticityField = solve 133 | u_vel = [None] * len(time_intervals) # Allocate memory for array 134 | v_vel = [None] * len(time_intervals) # Allocate memory for array 135 | for t in range(len(time_intervals)): 136 | omega = np.reshape(vorticityField[:, t], ([N, N])).transpose() 137 | omega_hat = np.fft.fft2(omega) 138 | psi_hat = omega_hat * K2_inv 139 | u_vel[t] = np.real(np.fft.ifft2(-1j * Ky * psi_hat * dealias)) 140 | v_vel[t] = np.real(np.fft.ifft2(1j * Kx * psi_hat * dealias)) 141 | u_vel = np.reshape(np.array(u_vel), (len(time_intervals), N2), 1) 142 | v_vel = np.reshape(np.array(v_vel), (len(time_intervals), N2), 1) 143 | return u_vel, v_vel 144 | 145 | 146 | #################################################################################################### 147 | 148 | # Base constants and spatial grid vectors 149 | nu = 1e-6 150 | L = np.pi 151 | N = int(256) 152 | N2 = int(N ** 2) 153 | dx = 2 * L / N 154 | x = np.linspace(1 - N / 2, N / 2, N) * dx 155 | y = np.linspace(1 - N / 2, N / 2, N) * dx 156 | [X, Y] = np.meshgrid(x, y) 157 | 158 | # Spectral frequencies and grid vectors 159 | kx = fftfreq(N, 1. / N) 160 | ky = kx.copy() 161 | K = np.array(np.meshgrid(kx, ky), dtype=int) 162 | Kx = K[0] 163 | Ky = K[1] 164 | K2 = np.sum(K * K, 0, dtype=int) 165 | K2_inv = 1 / np.where(K2 == 0, 1, K2).astype(float) 166 | K2_inv[0][0] = 0 167 | # Dx = 1j * Kx * K2_inv 168 | # Dy = 1j * Ky * K2_inv 169 | kmax_dealias = 2. / 3. * (N / 2 + 1) 170 | dealias = np.array( 171 | (Kx < kmax_dealias) * (Ky < kmax_dealias), 172 | dtype=bool) 173 | 174 | # Initialize solution vector 175 | omega_vector = initialize('omega_1') 176 | 177 | animateOmega = False 178 | animateVelocity = True 179 | if (animateOmega or animateVelocity) == True: 180 | # Temporal data for animation 181 | t0 = 0 182 | t_end = 15 183 | dt = 0.1 184 | 185 | fig = plt.figure() 186 | numsteps = np.ceil(t_end / dt) 187 | step = 1 188 | pbar = tqdm(total=int(t_end / dt)) 189 | 190 | ims = [] 191 | while step <= numsteps: 192 | solve = integrate.solve_ivp(Rhs, [0, dt], omega_vector, method='RK45', rtol=1e-10, 193 | atol=1e-10) 194 | omega_vector = solve.y[:, -1] 195 | 196 | if animateOmega == True: 197 | if step % 5 == 0: 198 | omega = np.reshape(omega_vector, ([N, N])).transpose() 199 | im = plt.imshow(omega, cmap='jet', vmax=1, vmin=-1, 200 | animated=True) 201 | plt.pause(0.05) 202 | ims.append([im]) 203 | if animateVelocity == True: 204 | if step % 3 == 0: 205 | im = plt.imshow(np.abs((u ** 2) + (v ** 2)), cmap='jet', animated=True) 206 | ims.append([im]) 207 | plt.pause(0.05) 208 | step += 1 209 | pbar.update(1) 210 | 211 | if animateOmega == True: 212 | cbar = plt.colorbar(im) 213 | # ScalarMappable.set_clim(min_val,max_val) 214 | # cbar.set_ticks(np.linspace(min_val,max_val,10)) 215 | cbar.set_label('Vorticity magnitude [m/s]') 216 | plt.xlim(0, N) 217 | plt.ylim(0, N) 218 | plt.xlabel('x [m]') 219 | plt.ylabel('y [m]') 220 | # plt.axes().set_aspect('equal') 221 | ani = animation.ArtistAnimation(fig, ims, interval=15, blit=True, 222 | repeat_delay=None) 223 | ani.save('animation_folder/animationVorticity.gif', writer='imagemagick', fps=30) 224 | plt.show() 225 | if animateVelocity == True: 226 | cbar = plt.colorbar(im) 227 | cbar.set_label('Velocity magnitude [m/s]') 228 | plt.xlim(0, N) 229 | plt.ylim(0, N) 230 | plt.xlabel('x [m]') 231 | plt.ylabel('y [m]') 232 | # plt.axes().set_aspect('equal') 233 | ani = animation.ArtistAnimation(fig, ims, interval=15, blit=True, 234 | repeat_delay=None) 235 | ani.save('animation_folder/animationVelocity.gif', writer='imagemagick', fps=30) 236 | # TODO find out what interval we need to make a gif of a certain length in 237 | # seconds. 238 | plt.show() 239 | 240 | pbar.close() 241 | 242 | if (animateVelocity and animateOmega) == False: 243 | # Temporal data for non-animation 244 | print('Entering false script') 245 | # TODO for verification, the maximum dt can be changed in the ODE option argument 246 | t0 = 0 247 | t_end = 15 248 | dt = 0.1 249 | time_intervals = np.linspace(t0,t_end,t_end/dt+1) 250 | solve = integrate.solve_ivp(Rhs, [0, t_end], omega_vector, method='RK45', 251 | t_eval=time_intervals, rtol=1e-10, 252 | atol=1e-10) 253 | plt.imshow(np.abs((u ** 2) + (v ** 2)), cmap='jet') 254 | plt.show() 255 | writeToFile(solve) # Written to work for integrate.solve_ivp(). 256 | print(' ------- Script finished -------') 257 | -------------------------------------------------------------------------------- /2D/vorticity_formulation/2DSpectral_vorticity_MPI_IDUN.py: -------------------------------------------------------------------------------- 1 | # solve 2-D incompressible NS equations using spectral method 2 | 3 | import matplotlib.pyplot as plt 4 | from numpy import * 5 | from numpy.random import seed, uniform 6 | from numpy import max as npmax 7 | from numpy.fft import fftfreq, fft, ifft, fft2, ifft2, fftshift, ifftshift 8 | from mpi4py import MPI 9 | import matplotlib.animation as animation 10 | 11 | try: 12 | from tqdm import tqdm 13 | except ImportError: 14 | pass 15 | 16 | # parent = os.path.abspath(os.path.join(os.path.dirname(__file__), '.')) 17 | # sys.path.append(parent) 18 | 19 | # parameters 20 | tend = 100 21 | dt = 1e-3 22 | Nstep = int(ceil(tend / dt)) 23 | N = Nx = Ny = 256; # grid size 24 | t = 0 25 | nu = 5e-5 # viscosity 26 | ICchoice = 'omega4' 27 | aniNr = 0.05 * Nstep 28 | save_dt = dt 29 | save_every = 0.01*Nstep 30 | save_interval = int(ceil(save_every)) 31 | global store_counter 32 | store_counter = 1 33 | t_list = linspace(0, tend, int(1 / save_dt + 1)) 34 | 35 | # ------------MPI setup--------- 36 | comm = MPI.COMM_WORLD 37 | num_processes = comm.Get_size() 38 | print('number of processes = ',num_processes) 39 | rank = comm.Get_rank() 40 | Np = int(N / num_processes) 41 | # slab decomposition, split arrays in x direction in physical space, in ky direction in 42 | # Fourier space 43 | Uc_hat = empty((N, Np), dtype=complex) 44 | Uc_hatT = empty((Np, N), dtype=complex) 45 | U_mpi = empty((num_processes, Np, Np), dtype=complex) 46 | 47 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 48 | b = [0.5, 0.5, 1.] 49 | 50 | 51 | 52 | def ifftn_mpi(fu, u): 53 | Uc_hat[:] = ifftshift(ifft(fftshift(fu), axis=0)) 54 | comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX]) 55 | Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape) 56 | u[:] = ifftshift(ifft(fftshift(Uc_hatT), axis=1)) 57 | return u 58 | 59 | 60 | # FFT 61 | def fftn_mpi(u, fu): 62 | Uc_hatT[:] = fftshift(fft(ifftshift(u), axis=1)) 63 | U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np), 1) 64 | comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX]) 65 | fu[:] = fftshift(fft(ifftshift(fu), axis=0)) 66 | return fu 67 | 68 | 69 | 70 | 71 | 72 | 73 | # ----Initialize Velocity in Fourier space----------- 74 | def IC_condition(Nx, Np, u, v, u_hat, v_hat, ICchoice, omega, omega_hat, X, Y): 75 | if ICchoice == 'randomVel': 76 | u = random.rand(Np, Ny) 77 | v = random.rand(Np, Ny) 78 | # u = u / npmax(u) 79 | # v = v / npmax(v) 80 | u_hat = fftn_mpi(u, u_hat) 81 | v_hat = fftn_mpi(v, v_hat) 82 | omega_hat = 1j * (Kx * v_hat - Ky * u_hat); 83 | if ICchoice == 'Vel1': 84 | if rank == 0: 85 | # u = 86 | ## v = 87 | # u = u/npmax(u) 88 | # v = v/npmax(v) 89 | # u_hat = fftn_mpi(u, u_hat) 90 | # v_hat = fftn_mpi(v, v_hat) 91 | u_hat[2, 2] = 5 + 10j 92 | v_hat[2, 2] = 5 + 10j 93 | u_hat[5, 2] = 5 + 10j 94 | v_hat[5, 2] = 5 + 10j 95 | u_hat[2, 3] = 5 + 10j 96 | v_hat[2, 3] = 5 + 10j 97 | u = ifftn_mpi(u_hat, u) 98 | v = ifftn_mpi(v_hat, v) 99 | u = u / npmax(u) 100 | v = v / npmax(v) 101 | u_hat = fftn_mpi(u, u_hat) 102 | v_hat = fftn_mpi(v, v_hat) 103 | omega_hat = 1j * (Kx * v_hat - Ky * u_hat); 104 | if ICchoice == 'omegahat1': 105 | if rank == 0: 106 | random.seed(1969) 107 | omega_hat[0, 4] = random.uniform() + 1j * random.uniform() 108 | omega_hat[1, 1] = random.uniform() + 1j * random.uniform() 109 | omega_hat[3, 0] = random.uniform() + 1j * random.uniform() 110 | omega_hat[2, 3] = random.uniform() + 1j * random.uniform() 111 | omega_hat[5, 3] = random.uniform() + 1j * random.uniform() 112 | 113 | omega = abs(ifftn_mpi(omega_hat, omega)) 114 | omega = omega / npmax(omega) 115 | omega_hat = fftn_mpi(omega, omega_hat) 116 | if ICchoice == 'omega1': 117 | omega = sin(X)*cos(Y) 118 | omega_hat = fftn_mpi(omega, omega_hat) 119 | if ICchoice == 'omega3': 120 | H = exp(-((2*X - pi + pi / 5) ** 2 + (4*Y - pi + pi / 5) ** 2) / 0.3) - exp( 121 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 122 | -((3*X - pi - pi / 5) ** 2 + (2*Y - pi - pi / 5) ** 2) / 0.4)+exp(-((2*X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 123 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (3*Y - pi - pi / 5)**2)/0.4)+\ 124 | exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 125 | -((X - pi - pi / 5)**2 + (3*Y - pi + pi / 5) ** 2) /0.3) - exp(-((X - pi - pi / 5)**2 + (Y - pi - pi / 5)**2)/0.4); 126 | epsilon = 0.4; 127 | Noise = random.rand(Np, Ny) 128 | omega = H + Noise*epsilon 129 | omega_hat = (fftn_mpi(omega, omega_hat)) 130 | omega = real(ifftn_mpi(omega_hat, omega)) 131 | if ICchoice == 'omega4': 132 | H = exp(-((2*X - pi + pi / 5) ** 2 + (4*Y - pi + pi / 5) ** 2) / 0.3) - exp( 133 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 134 | -((X + pi - pi / 5) ** 2 + (2*Y - pi - pi / 5) ** 2) / 0.4)+exp(-((2*X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 135 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (3*Y - pi - pi / 5)**2)/0.4)+\ 136 | exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 137 | -((X - pi - pi / 5)**2 + (3*Y - pi + pi / 5) ** 2) /0.3) + exp(-((X + pi - pi / 5)**2 + (Y + pi - pi / 5)**2)/0.4)-\ 138 | exp(-((X - pi + pi / 5) ** 2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 139 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 140 | -((X - pi - pi / 5) ** 2 + (Y - pi - pi / 5) ** 2) / 0.4) 141 | epsilon = 0.7; 142 | Noise = random.rand(Np, Ny) 143 | omega = H + Noise*epsilon 144 | omega_hat = (fftn_mpi(omega, omega_hat)) 145 | omega = real(ifftn_mpi(omega_hat, omega)) 146 | if ICchoice == 'omega2': 147 | 148 | H = exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 149 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (Y - pi - pi / 5)**2)/0.4); 150 | #epsilon = 0.1; 151 | #Noise = random.rand(Np, Ny) 152 | omega = H 153 | omega_hat = (fftn_mpi(omega,omega_hat)) 154 | omega = real(ifftn_mpi(omega_hat,omega)) 155 | return omega_hat 156 | 157 | 158 | # ------output function---- 159 | # this function output vorticty contour 160 | def output(save_counter, omega, u, v, x, y, Nx, Ny, rank, time, plotstring): 161 | # collect values to root 162 | omega_all = comm.gather(omega, root=0) 163 | u_all = comm.gather(u, root=0) 164 | v_all = comm.gather(v, root=0) 165 | x_all = comm.gather(x, root=0) 166 | y_all = comm.gather(y, root=0) 167 | if rank == 0: 168 | if plotstring == 'Vorticity': 169 | # reshape the ''list'' 170 | omega_all = asarray(omega_all).reshape(Nx, Ny) 171 | x_all = asarray(x_all).reshape(Nx, Ny) 172 | y_all = asarray(y_all).reshape(Nx, Ny) 173 | plt.contourf(x_all, y_all, omega_all, cmap='jet') 174 | delimiter = '' 175 | title = delimiter.join(['vorticity contour, time=', str(time)]) 176 | plt.xlabel('x') 177 | plt.ylabel('y') 178 | plt.title(title) 179 | plt.show() 180 | filename = delimiter.join(['vorticity_t=', str(time), '.png']) 181 | plt.savefig(filename, format='png') 182 | if plotstring == 'Velocity': 183 | # reshape the ''list'' 184 | u_all = asarray(u_all).reshape(Nx, Ny) 185 | v_all = asarray(v_all).reshape(Nx, Ny) 186 | x_all = asarray(x_all).reshape(Nx, Ny) 187 | y_all = asarray(y_all).reshape(Nx, Ny) 188 | # plt.contourf(x_all, y_all, (u_all ** 2 + v_all ** 2), cmap='jet') 189 | plt.imshow((u_all ** 2 + v_all ** 2), cmap='jet') 190 | delimiter = '' 191 | title = delimiter.join(['Velocity contour, time=', str(time)]) 192 | plt.xlabel('x') 193 | plt.ylabel('y') 194 | plt.title(title) 195 | plt.show() 196 | filename = delimiter.join(['velocity_t=', str(time), '.png']) 197 | plt.savefig(filename, format='png') 198 | if plotstring == 'VelocityAnimation': 199 | # reshape the ''list'' 200 | u_all = asarray(u_all).reshape(Nx, Ny) 201 | v_all = asarray(v_all).reshape(Nx, Ny) 202 | im = plt.imshow(sqrt((u_all ** 2) + (v_all ** 2)), cmap='jet', animated=True) 203 | # im = plt.quiver(x,y,u_all,v_all) 204 | ims.append([im]) 205 | # plt.show() 206 | if plotstring == 'VorticityAnimation': 207 | omega_all = asarray(omega_all).reshape(Nx, Ny) 208 | im = plt.imshow(abs(omega_all), cmap='jet', animated=True) 209 | ims.append([im]) 210 | # plt.show() 211 | if plotstring == 'store': 212 | omega_all = asarray(omega_all).reshape(Nx, Ny) 213 | u_all = asarray(u_all).reshape(Nx, Ny) 214 | v_all = asarray(v_all).reshape(Nx, Ny) 215 | #if (save_counter % save_interval): 216 | 217 | if (t==0): 218 | u_storage_init = u_all 219 | v_storage_init = v_all 220 | omega_storage_init = omega_all 221 | save('datafiles/u/u_vel_t_' + str(round(time)), u_storage_init) 222 | save('datafiles/v/v_vel_t_' + str(round(time)), v_storage_init) 223 | #save('datafiles/omega/omega_t_' + str(round(time)), omega_storage_init) 224 | #save('datafiles/time/tlist_t_' + str(round(time)), t_list) 225 | 226 | if(t!=0): 227 | u_storage[save_counter%save_interval] = u_all 228 | v_storage[save_counter%save_interval] = v_all 229 | #omega_storage[save_counter%save_interval] = omega_all 230 | 231 | 232 | if (save_counter%save_interval==(save_interval-1) and t!= 0 ): 233 | global store_counter 234 | print('Storing next time-sequence of variables') 235 | save('datafiles/u/u_vel_t_'+str(store_counter), u_storage) 236 | save('datafiles/v/v_vel_t_'+str(store_counter), v_storage) 237 | #save('datafiles/omega/omega_t_'+str(store_counter), omega_storage) 238 | #save('datafiles/time/tlist_t_'+str(round(time)), t_list) 239 | 240 | store_counter +=1 241 | 242 | # initialize x,y kx, ky coordinate 243 | def IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes): 244 | x = zeros((Np, Ny), dtype=float); 245 | y = zeros((Np, Ny), dtype=float); 246 | kx = zeros((Nx, Np), dtype=float); 247 | ky = zeros((Nx, Np), dtype=float); 248 | for j in range(Ny): 249 | x[0:Np, j] = range(Np); 250 | if num_processes == 1: 251 | x[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 252 | # offset for mpi 253 | if num_processes != 1: 254 | x = x - (num_processes / 2 - rank) * Np 255 | x = x * dx; 256 | for i in range(Np): 257 | y[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 258 | y = y * dy; 259 | 260 | for j in range(Np): 261 | kx[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 262 | for i in range(Nx): 263 | ky[i, 0:Np] = range(Np); 264 | if num_processes == 1: 265 | ky[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 266 | # offset for mpi 267 | if num_processes != 1: 268 | ky = ky - (num_processes / 2 - rank) * Np 269 | 270 | k2 = kx ** 2 + ky ** 2; 271 | for i in range(Nx): 272 | for j in range(Np): 273 | if (k2[i, j] == 0): 274 | k2[i, j] = 1e-5; # so that I do not divide by 0 below when using 275 | # projection operator 276 | # k2_exp = exp(-nu * (k2 ** 5) * dt - nu_hypo * dt); 277 | k2_inv = K2_inv = 1 / where(k2 == 0, 1, k2).astype(float) 278 | return x, y, kx, ky, k2, k2_inv 279 | 280 | 281 | # -----------GRID setup----------- 282 | Lx = 2 * pi; 283 | Ly = 2 * pi; 284 | dx = Lx / Nx; 285 | dy = Ly / Ny; 286 | x, y, Kx, Ky, K2, K2_inv = IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes) 287 | # TODO check what needs to be done with the wave numbers to get rid of IC_Coor function 288 | 289 | sx = slice(rank*Np,(rank+1)*Np) 290 | Xmesh = mgrid[sx, :N].astype(float) * Lx / N 291 | X = Xmesh[0] 292 | Y = Xmesh[1] 293 | 294 | 295 | x = Y[0] 296 | y = Y[0] 297 | kx = fftfreq(N, 1. / N) 298 | ky = kx.copy() 299 | 300 | ''' 301 | K = array(meshgrid(kx, ky[sx], indexing='ij'), dtype=int) 302 | Kx = K[1] 303 | Ky = K[0] 304 | K2 = sum(K * K, 0, dtype=int) 305 | ''' 306 | 307 | 308 | LapHat = K2.copy() 309 | LapHat *= -1 310 | #K2[0][0] = 1 311 | K2 *=-1 312 | K2_inv = 1 / where(K2 == 0, 1, K2).astype(float) 313 | ikx_over_K2 = 1j * Kx * K2_inv 314 | iky_over_K2 = 1j * Ky * K2_inv 315 | 316 | kmax_dealias = 2. / 3. * (N / 2 + 1) 317 | dealias = array((abs(Kx) < kmax_dealias) * (abs(Ky) < kmax_dealias), dtype=bool) 318 | 319 | plt.imshow(K2,cmap='viridis') 320 | plt.colorbar() 321 | plt.show() 322 | 323 | # ----Initialize Variables-------(hat denotes variables in Fourier space,) 324 | u_hat = zeros((Nx, Np), dtype=complex); 325 | v_hat = zeros((Nx, Np), dtype=complex); 326 | u = zeros((Np, Ny), dtype=float); 327 | v = zeros((Np, Ny), dtype=float); 328 | omega_hat0 = zeros((Nx, Np), dtype=complex); 329 | omega_hat1 = zeros((Nx, Np), dtype=complex); 330 | omega_hat = zeros((Nx, Np), dtype=complex); 331 | omega_hat_new = zeros((Nx, Np), dtype=complex); 332 | omega = zeros((Np, Ny), dtype=float); 333 | omega_kx = zeros((Np, Ny), dtype=float); 334 | omega_ky = zeros((Np, Ny), dtype=float); 335 | v_grad_omega = zeros((Np, Ny), dtype=float); 336 | psi_hat = zeros((Nx, Np), dtype=complex); 337 | rhs_hat = zeros((Nx, Np), dtype=complex); 338 | rhs = zeros((Np, Nx), dtype=float); 339 | visc_term_complex = zeros((Ny, Np), dtype=complex) 340 | visc_term_real = zeros((Np, Ny), dtype=float) 341 | v_grad_omega_hat = zeros((Ny, Np), dtype=complex) 342 | u_storage = empty((save_interval, Nx, Nx), dtype=float) 343 | v_storage = empty((save_interval, Nx, Nx), dtype=float) 344 | omega_storage = empty((save_interval, Nx, Nx), dtype=float) 345 | u_storage_init = empty((1, Nx, Nx), dtype=float) 346 | v_storage_init = empty((1, Nx, Nx), dtype=float) 347 | omega_storage_init = empty((1, Nx, Nx), dtype=float) 348 | 349 | # generate initial velocity field 350 | omega_hat_t0 = IC_condition(Nx, Np, u, v, u_hat, v_hat, ICchoice, omega, omega_hat, X, Y) 351 | # omega_hat_t0 = 1j * (Kx * v_hat - Ky * u_hat)*dealias; 352 | omega = ifftn_mpi(omega_hat_t0,omega) 353 | 354 | step = 1 355 | try: 356 | pbar = tqdm(total=int(Nstep)) 357 | except: 358 | pass 359 | save_counter = 0 360 | plotstring = ('store') 361 | fig = plt.figure() 362 | ims = [] 363 | 364 | # ----Main Loop----------- 365 | for n in range(Nstep + 1): 366 | if n == 0: 367 | # TODO check what needs to be done to use IC from matlab program 368 | # TODO very low convection? bug? compare with animation plot on github 369 | omega_hat = omega_hat_t0 370 | 371 | u_hat = -iky_over_K2 * omega_hat 372 | v_hat = ikx_over_K2 * omega_hat 373 | u = ifftn_mpi(u_hat, u) 374 | v = ifftn_mpi(v_hat, v) 375 | 376 | omega_kx = ifftn_mpi(1j*Kx * omega_hat, omega_kx) 377 | omega_ky = ifftn_mpi(1j*Ky * omega_hat, omega_ky) 378 | v_grad_omega = (u * omega_kx + v * omega_ky) 379 | v_grad_omega_hat = fftn_mpi(v_grad_omega, v_grad_omega_hat)*dealias 380 | 381 | 382 | visc_term_complex = -nu * K2 * omega_hat 383 | 384 | #rhs_hat = visc_term_complex - u_hat * 1j * Kx * omega_hat * dealias - v_hat * 1j * \ 385 | # Ky * omega_hat * dealias 386 | # rhs_hat = visc_term_complex - v_grad_omega_hat 387 | # rhs = ifftn_mpi(rhs_hat, rhs) 388 | 389 | omega_hat_new = 1 / (1 / dt - 0.5 * nu * LapHat)*( 390 | (1 / dt + 0.5 * nu * LapHat)* omega_hat - v_grad_omega_hat); 391 | omega_hat = omega_hat_new.copy() 392 | 393 | # omega = ifftn_mpi(omega_hat,omega) 394 | ''' 395 | if n%100==0: 396 | omega = ifftn_mpi(omega_hat,omega) 397 | plt.imshow(abs(omega),cmap='jet') 398 | #plt.imshow(sqrt((u ** 2) + (v ** 2)), cmap='jet', animated=True) 399 | plt.show() 400 | ''' 401 | ''' 402 | omega_hat1 = omega_hat0 = omega_hat 403 | for rk in range(4): 404 | if rk < 3: omega_hat = omega_hat0 + b[rk] * dt * rhs_hat 405 | omega_hat1 += a[rk] * dt * rhs_hat 406 | omega_hat = omega_hat1 407 | ''' 408 | 409 | 410 | if(t!=0): 411 | omega = ifftn_mpi(omega_hat, omega) 412 | output(save_counter, omega, u, v, x, y, Nx, Ny, rank, t, plotstring) 413 | save_counter += 1 414 | if (t==0): 415 | omega = ifftn_mpi(omega_hat, omega) 416 | output(save_counter, omega, u, v, x, y, Nx, Ny, rank, t, plotstring) 417 | 418 | 419 | t = t + dt; 420 | step += 1 421 | try: 422 | pbar.update(1) 423 | except: 424 | pass 425 | if rank == 0: 426 | if plotstring in ['VelocityAnimation', 'VorticityAnimation']: 427 | ani = animation.ArtistAnimation(fig, ims, interval=2, blit=True, 428 | repeat_delay=None) 429 | ani.save('animationVelocity.gif', writer='imagemagick') 430 | -------------------------------------------------------------------------------- /2D/vorticity_formulation/2D_mpi_example.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Nov 27 15:14:22 2016 4 | @author: Xin 5 | """ 6 | # solve 2-D incompressible NS equations using spectral method 7 | import matplotlib 8 | 9 | import matplotlib.pyplot as plt 10 | import sys 11 | import os.path 12 | from numpy import * 13 | from numpy.fft import fftfreq, fft, ifft, fft2, ifft2, fftshift, ifftshift 14 | from mpi4py import MPI 15 | from tqdm import tqdm 16 | import matplotlib.animation as animation 17 | 18 | parent = os.path.abspath(os.path.join(os.path.dirname(__file__), '.')) 19 | sys.path.append(parent) 20 | 21 | # parameters 22 | new = 1; 23 | Nstep = 1000; # no. of steps 24 | N = Nx = Ny = 64; # grid size 25 | t = 0; 26 | nu = 5e-10; # viscosity 27 | nu_hypo = 2e-3; # hypo-viscosity 28 | dt = 5e-7; # time-step 29 | dt_h = dt / 2; # half-time step 30 | ic_type = 2 # 1 for Taylor-Green init_cond; 2 for random init_cond 31 | k_ic = 1; # initial wavenumber for Taylor green forcing 32 | diag_out_step = 0.02 * Nstep; # frequency of outputting diagnostics 33 | 34 | # ------------MPI setup--------- 35 | comm = MPI.COMM_WORLD 36 | num_processes = comm.Get_size() 37 | rank = comm.Get_rank() 38 | # slab decomposition, split arrays in x direction in physical space, in ky direction in 39 | # Fourier space 40 | Np = int(N / num_processes) 41 | 42 | # ---------declare functions that will be used---------- 43 | 44 | # ---------2D FFT and IFFT----------- 45 | Uc_hat = empty((N, Np), dtype=complex) 46 | Uc_hatT = empty((Np, N), dtype=complex) 47 | U_mpi = empty((num_processes, Np, Np), dtype=complex) 48 | 49 | 50 | # inverse FFT 51 | def ifftn_mpi(fu, u): 52 | Uc_hat[:] = ifftshift(ifft(fftshift(fu), axis=0)) 53 | comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX]) 54 | Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape) 55 | u[:] = ifftshift(ifft(fftshift(Uc_hatT), axis=1)) 56 | return u 57 | 58 | 59 | # FFT 60 | def fftn_mpi(u, fu): 61 | Uc_hatT[:] = fftshift(fft(ifftshift(u), axis=1)) 62 | U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np), 1) 63 | comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX]) 64 | fu[:] = fftshift(fft(ifftshift(fu), axis=0)) 65 | return fu 66 | 67 | 68 | # initialize x,y kx, ky coordinate 69 | def IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes): 70 | x = zeros((Np, Ny), dtype=float); 71 | y = zeros((Np, Ny), dtype=float); 72 | kx = zeros((Nx, Np), dtype=float); 73 | ky = zeros((Nx, Np), dtype=float); 74 | for j in range(Ny): 75 | x[0:Np, j] = range(Np); 76 | if num_processes == 1: 77 | x[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 78 | # offset for mpi 79 | if num_processes != 1: 80 | x = x - (num_processes / 2 - rank) * Np 81 | x = x * dx; 82 | for i in range(Np): 83 | y[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 84 | y = y * dy; 85 | 86 | for j in range(Np): 87 | kx[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 88 | for i in range(Nx): 89 | ky[i, 0:Np] = range(Np); 90 | if num_processes == 1: 91 | ky[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 92 | # offset for mpi 93 | if num_processes != 1: 94 | ky = ky - (num_processes / 2 - rank) * Np 95 | 96 | k2 = kx ** 2 + ky ** 2; 97 | for i in range(Nx): 98 | for j in range(Np): 99 | if (k2[i, j] == 0): 100 | k2[ 101 | i, j] = 1e-5; # so that I do not divide by 0 below when using 102 | # projection operator 103 | k2_exp = exp(-nu * (k2 ** 5) * dt - nu_hypo * dt); 104 | return x, y, kx, ky, k2, k2_exp 105 | 106 | 107 | # ---------Dealiasing function---- 108 | def delias(Vxhat, Vyhat, Nx, Np, k2): 109 | # use 1/3 rule to remove values of wavenumber >= Nx/3 110 | for i in range(Nx): 111 | for j in range(Np): 112 | if (sqrt(k2[i, j]) >= Nx / 3.): 113 | Vxhat[i, j] = 0; 114 | Vyhat[i, j] = 0; 115 | # Projection operator on velocity fields to make them solenoidal----- 116 | tmp = (kx * Vxhat + ky * Vyhat) / k2; 117 | Vxhat = Vxhat - kx * tmp; 118 | Vyhat = Vyhat - ky * tmp; 119 | return Vxhat, Vyhat 120 | 121 | 122 | # ----Initialize Velocity in Fourier space----------- 123 | def IC_condition(ic_type, k_ic, kx, ky, Nx, Np): 124 | # taylor green vorticity field 125 | Vxhat = zeros((Nx, Np), dtype=complex); 126 | Vyhat = zeros((Nx, Np), dtype=complex); 127 | if (new == 1 and ic_type == 1): 128 | for iss in [-1, 1]: 129 | for jss in [-1, 1]: 130 | for i in range(Nx): 131 | for j in range(Np): 132 | if (int(kx[i, j]) == iss * k_ic and int(ky[i, j]) == jss * k_ic): 133 | Vxhat[i, j] = -1j * iss; 134 | Vyhat[i, j] = -1j * (-jss); 135 | # Set total energy to 1 136 | Vxhat = 0.5 * Vxhat; 137 | Vyhat = 0.5 * Vyhat; 138 | # generate random velocity field 139 | elif (new == 1 and ic_type == 2): 140 | Vx = random.rand(Np, Ny) 141 | Vy = random.rand(Np, Ny) 142 | Vxhat = fftn_mpi(Vx, Vxhat) 143 | Vyhat = fftn_mpi(Vy, Vyhat) 144 | return Vxhat, Vyhat 145 | 146 | 147 | # ------output function---- 148 | # this function output vorticty contour 149 | def output(Wz, x, y, Nx, Ny, rank, time,plotstring): 150 | # collect values to root 151 | Wz_all = comm.gather(Wz, root=0) 152 | x_all = comm.gather(x, root=0) 153 | y_all = comm.gather(y, root=0) 154 | if rank == 0: 155 | # reshape the ''list'' 156 | Wz_all = asarray(Wz_all).reshape(Nx, Ny) 157 | x_all = asarray(x_all).reshape(Nx, Ny) 158 | y_all = asarray(y_all).reshape(Nx, Ny) 159 | if plotstring == 'save': 160 | plt.contourf(x_all, y_all, Wz_all) 161 | delimiter = '' 162 | title = delimiter.join(['vorticity contour, time=', str(time)]) 163 | plt.xlabel('x') 164 | plt.ylabel('y') 165 | plt.title(title) 166 | plt.show() 167 | filename = delimiter.join(['vorticity@t=', str(time), '.png']) 168 | plt.savefig(filename, format='png') 169 | if plotstring == 'VelocityAnimation': 170 | # reshape the ''list'' 171 | u_all = asarray(u_all).reshape(Nx, Ny) 172 | v_all = asarray(v_all).reshape(Nx, Ny) 173 | im = plt.imshow(sqrt((u_all ** 2) + (v_all ** 2)), cmap='jet', animated=True) 174 | # im = plt.quiver(x,y,u_all,v_all) 175 | ims.append([im]) 176 | if plotstring == 'VorticityAnimation': 177 | omega_all = asarray(Wz_all).reshape(Nx, Ny) 178 | im = plt.imshow(abs(omega_all), cmap='jet', animated=True) 179 | ims.append([im]) 180 | 181 | # --------------finish declaration of functions------- 182 | 183 | 184 | # -----------GRID setup----------- 185 | Lx = 2 * pi; 186 | Ly = 2 * pi; 187 | dx = Lx / Nx; 188 | dy = Ly / Ny; 189 | 190 | # obtain x, y, kx, ky 191 | x, y, kx, ky, k2, k2_exp = IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes) 192 | 193 | # ----Initialize Variables-------(hat denotes variables in Fourier space,) 194 | # velocity 195 | Vxhat = zeros((Nx, Np), dtype=complex); 196 | Vyhat = zeros((Nx, Np), dtype=complex); 197 | # Vorticity 198 | Wzhat = zeros((Nx, Np), dtype=complex); 199 | # Nonlinear term 200 | NLxhat = zeros((Nx, Np), dtype=complex); 201 | NLyhat = zeros((Nx, Np), dtype=complex); 202 | # variables in physical space 203 | Vx = zeros((Np, Ny), dtype=float); 204 | Vy = zeros((Np, Ny), dtype=float); 205 | Wz = zeros((Np, Ny), dtype=float); 206 | 207 | # generate initial velocity field 208 | Vxhat, Vyhat = IC_condition(ic_type, k_ic, kx, ky, Nx, Np) 209 | 210 | 211 | plt.imshow(k2) 212 | plt.show() 213 | # ------Dealiasing------------------------------------------------ 214 | Vxhat, Vyhat = delias(Vxhat, Vyhat, Nx, Np, k2) 215 | # 216 | # ------Storing variables for later use in time integration-------- 217 | Vxhat_t0 = Vxhat; 218 | Vyhat_t0 = Vyhat; 219 | # 220 | plotstring = 'VorticityAnimation' 221 | step = 1 222 | pbar = tqdm(total=int(Nstep)) 223 | 224 | fig = plt.figure() 225 | ims = [] 226 | # ----Main Loop----------- 227 | for istep in range(Nstep + 1): 228 | if rank == 0: 229 | wt = MPI.Wtime() 230 | # ------Dealiasing 231 | Vxhat, Vyhat = delias(Vxhat, Vyhat, Nx, Np, k2) 232 | # Calculate Vorticity 233 | Wzhat = 1j * (kx * Vyhat - ky * Vxhat); 234 | # fields in x-space 235 | Vx = ifftn_mpi(Vxhat, Vx) 236 | Vy = ifftn_mpi(Vyhat, Vy) 237 | Wz = ifftn_mpi(Wzhat, Wz) 238 | 239 | # Fields in Fourier Space 240 | Vxhat = fftn_mpi(Vx, Vxhat) 241 | Vyhat = fftn_mpi(Vy, Vyhat) 242 | Wzhat = fftn_mpi(Wz, Wzhat) 243 | 244 | # Calculate non-linear term in x-space 245 | NLx = Vy * Wz; 246 | NLy = -Vx * Wz; 247 | 248 | # move non-linear term back to Fourier k-space 249 | NLxhat = fftn_mpi(NLx, NLxhat) 250 | NLyhat = fftn_mpi(NLy, NLyhat) 251 | 252 | # ------Dealiasing------------------------------------------------ 253 | Vxhat, Vyhat = delias(Vxhat, Vyhat, Nx, Np, k2) 254 | 255 | # Integrate in time 256 | # ---Euler for 1/2-step----------- 257 | if (istep == 0): 258 | Vxhat = Vxhat + dt_h * ( 259 | NLxhat - nu * (k2 ** 5) * Vxhat - nu_hypo * (k2 ** (-0)) ** Vxhat); 260 | Vyhat = Vyhat + dt_h * ( 261 | NLyhat - nu * (k2 ** 5) * Vyhat - nu_hypo * (k2 ** (-0)) ** Vyhat); 262 | oNLxhat = NLxhat; 263 | oNLyhat = NLyhat; 264 | # ---Midpoint time-integration---- 265 | elif (istep == 1): 266 | Vxhat = Vxhat_t0 + dt * ( 267 | NLxhat - nu * (k2 ** 5) * Vxhat - nu_hypo * (k2 ** (-0)) * Vxhat); 268 | Vyhat = Vyhat_t0 + dt * ( 269 | NLyhat - nu * (k2 ** 5) * Vyhat - nu_hypo * (k2 ** (-0)) * Vyhat); 270 | # ---Adam-Bashforth integration--- 271 | else: 272 | Vxhat = Vxhat + dt * (1.5 * NLxhat - 0.5 * oNLxhat * k2_exp); 273 | Vyhat = Vyhat + dt * (1.5 * NLyhat - 0.5 * oNLyhat * k2_exp); 274 | Vxhat = Vxhat * k2_exp; 275 | Vyhat = Vyhat * k2_exp; 276 | Vxhat = Vxhat; 277 | Vyhat = Vyhat; 278 | 279 | oNLxhat = NLxhat; 280 | oNLyhat = NLyhat; 281 | step += 1 282 | pbar.update(1) 283 | # output vorticity contour 284 | if (istep % diag_out_step == 0): 285 | output(Wz, x, y, Nx, Ny, rank, t,plotstring) 286 | if rank == 0: 287 | print 288 | 'simulation time' 289 | print 290 | MPI.Wtime() - wt 291 | 292 | t = t + dt; 293 | 294 | if rank == 0: 295 | if plotstring in ['VelocityAnimation', 'VorticityAnimation']: 296 | ani = animation.ArtistAnimation(fig, ims, interval=15, blit=True, 297 | repeat_delay=None) 298 | ani.save('animationVelocity.gif', writer='imagemagick', fps=30) 299 | if plotstring == 'store': 300 | save('datafiles/u_vel', u_storage) 301 | save('datafiles/v_vel', v_storage) 302 | save('datafiles/omega', omega_storage) 303 | save('datafiles/tlist', t_list) 304 | -------------------------------------------------------------------------------- /2D/vorticity_formulation/2D_vorticity_mpi_scaling_script.py: -------------------------------------------------------------------------------- 1 | # solve 2-D incompressible NS equations using spectral method 2 | 3 | import matplotlib.pyplot as plt 4 | from numpy import * 5 | from numpy.random import seed, uniform 6 | from numpy import max as npmax 7 | from numpy.fft import fftfreq, fft, ifft, fft2, ifft2, fftshift, ifftshift 8 | from mpi4py import MPI 9 | import matplotlib.animation as animation 10 | 11 | try: 12 | from tqdm import tqdm 13 | except ImportError: 14 | pass 15 | 16 | # parent = os.path.abspath(os.path.join(os.path.dirname(__file__), '.')) 17 | # sys.path.append(parent) 18 | 19 | # parameters 20 | tend = 100 21 | dt = 1e-1 22 | Nstep = int(ceil(tend / dt)) 23 | N = Nx = Ny = 16; # grid size 24 | t = 0 25 | nu = 5e-3 # viscosity 26 | ICchoice = 'omega4' 27 | 28 | 29 | # ------------MPI setup--------- 30 | comm = MPI.COMM_WORLD 31 | num_processes = comm.Get_size() 32 | print('number of processes = ',num_processes) 33 | rank = comm.Get_rank() 34 | Np = int(N / num_processes) 35 | # slab decomposition, split arrays in x direction in physical space, in ky direction in 36 | # Fourier space 37 | Uc_hat = empty((N, Np), dtype=complex) 38 | Uc_hatT = empty((Np, N), dtype=complex) 39 | U_mpi = empty((num_processes, Np, Np), dtype=complex) 40 | 41 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 42 | b = [0.5, 0.5, 1.] 43 | 44 | 45 | 46 | def ifftn_mpi(fu, u): 47 | Uc_hat[:] = ifftshift(ifft(fftshift(fu), axis=0)) 48 | comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX]) 49 | Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape) 50 | u[:] = ifftshift(ifft(fftshift(Uc_hatT), axis=1)) 51 | return u 52 | 53 | 54 | # FFT 55 | def fftn_mpi(u, fu): 56 | Uc_hatT[:] = fftshift(fft(ifftshift(u), axis=1)) 57 | U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np), 1) 58 | comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX]) 59 | fu[:] = fftshift(fft(ifftshift(fu), axis=0)) 60 | return fu 61 | 62 | 63 | 64 | 65 | 66 | 67 | # ----Initialize Velocity in Fourier space----------- 68 | def IC_condition(Nx, Np, u, v, u_hat, v_hat, ICchoice, omega, omega_hat, X, Y): 69 | if ICchoice == 'randomVel': 70 | u = random.rand(Np, Ny) 71 | v = random.rand(Np, Ny) 72 | # u = u / npmax(u) 73 | # v = v / npmax(v) 74 | u_hat = fftn_mpi(u, u_hat) 75 | v_hat = fftn_mpi(v, v_hat) 76 | omega_hat = 1j * (Kx * v_hat - Ky * u_hat); 77 | if ICchoice == 'Vel1': 78 | if rank == 0: 79 | # u = 80 | ## v = 81 | # u = u/npmax(u) 82 | # v = v/npmax(v) 83 | # u_hat = fftn_mpi(u, u_hat) 84 | # v_hat = fftn_mpi(v, v_hat) 85 | u_hat[2, 2] = 5 + 10j 86 | v_hat[2, 2] = 5 + 10j 87 | u_hat[5, 2] = 5 + 10j 88 | v_hat[5, 2] = 5 + 10j 89 | u_hat[2, 3] = 5 + 10j 90 | v_hat[2, 3] = 5 + 10j 91 | u = ifftn_mpi(u_hat, u) 92 | v = ifftn_mpi(v_hat, v) 93 | u = u / npmax(u) 94 | v = v / npmax(v) 95 | u_hat = fftn_mpi(u, u_hat) 96 | v_hat = fftn_mpi(v, v_hat) 97 | omega_hat = 1j * (Kx * v_hat - Ky * u_hat); 98 | if ICchoice == 'omegahat1': 99 | if rank == 0: 100 | random.seed(1969) 101 | omega_hat[0, 4] = random.uniform() + 1j * random.uniform() 102 | omega_hat[1, 1] = random.uniform() + 1j * random.uniform() 103 | omega_hat[3, 0] = random.uniform() + 1j * random.uniform() 104 | omega_hat[2, 3] = random.uniform() + 1j * random.uniform() 105 | omega_hat[5, 3] = random.uniform() + 1j * random.uniform() 106 | 107 | omega = abs(ifftn_mpi(omega_hat, omega)) 108 | omega = omega / npmax(omega) 109 | omega_hat = fftn_mpi(omega, omega_hat) 110 | if ICchoice == 'omega1': 111 | omega = sin(X)*cos(Y) 112 | omega_hat = fftn_mpi(omega, omega_hat) 113 | if ICchoice == 'omega3': 114 | H = exp(-((2*X - pi + pi / 5) ** 2 + (4*Y - pi + pi / 5) ** 2) / 0.3) - exp( 115 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 116 | -((3*X - pi - pi / 5) ** 2 + (2*Y - pi - pi / 5) ** 2) / 0.4)+exp(-((2*X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 117 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (3*Y - pi - pi / 5)**2)/0.4)+\ 118 | exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 119 | -((X - pi - pi / 5)**2 + (3*Y - pi + pi / 5) ** 2) /0.3) - exp(-((X - pi - pi / 5)**2 + (Y - pi - pi / 5)**2)/0.4); 120 | epsilon = 0.4; 121 | Noise = random.rand(Np, Ny) 122 | omega = H + Noise*epsilon 123 | omega_hat = (fftn_mpi(omega, omega_hat)) 124 | omega = real(ifftn_mpi(omega_hat, omega)) 125 | if ICchoice == 'omega4': 126 | H = exp(-((2*X - pi + pi / 5) ** 2 + (4*Y - pi + pi / 5) ** 2) / 0.3) - exp( 127 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 128 | -((X + pi - pi / 5) ** 2 + (2*Y - pi - pi / 5) ** 2) / 0.4)+exp(-((2*X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 129 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (3*Y - pi - pi / 5)**2)/0.4)+\ 130 | exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 131 | -((X - pi - pi / 5)**2 + (3*Y - pi + pi / 5) ** 2) /0.3) + exp(-((X + pi - pi / 5)**2 + (Y + pi - pi / 5)**2)/0.4)-\ 132 | exp(-((X - pi + pi / 5) ** 2 + (Y - pi + pi / 5) ** 2) / 0.3) + exp( 133 | -((2*X - pi - pi / 5) ** 2 + (3*Y - pi + pi / 5) ** 2) / 0.2) + exp( 134 | -((X - pi - pi / 5) ** 2 + (Y - pi - pi / 5) ** 2) / 0.4) 135 | epsilon = 0.7; 136 | Noise = random.rand(Np, Ny) 137 | omega = H + Noise*epsilon 138 | omega_hat = (fftn_mpi(omega, omega_hat)) 139 | omega = real(ifftn_mpi(omega_hat, omega)) 140 | if ICchoice == 'omega2': 141 | 142 | H = exp(-((X - pi + pi / 5)**2 + (Y - pi + pi / 5) ** 2) / 0.3) - exp( 143 | -((X - pi - pi / 5)**2 + (Y - pi + pi / 5) ** 2) /0.2) + exp(-((X - pi - pi / 5)**2 + (Y - pi - pi / 5)**2)/0.4); 144 | #epsilon = 0.1; 145 | #Noise = random.rand(Np, Ny) 146 | omega = H 147 | omega_hat = (fftn_mpi(omega,omega_hat)) 148 | omega = real(ifftn_mpi(omega_hat,omega)) 149 | return omega_hat 150 | 151 | 152 | 153 | # initialize x,y kx, ky coordinate 154 | def IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes): 155 | x = zeros((Np, Ny), dtype=float); 156 | y = zeros((Np, Ny), dtype=float); 157 | kx = zeros((Nx, Np), dtype=float); 158 | ky = zeros((Nx, Np), dtype=float); 159 | for j in range(Ny): 160 | x[0:Np, j] = range(Np); 161 | if num_processes == 1: 162 | x[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 163 | # offset for mpi 164 | if num_processes != 1: 165 | x = x - (num_processes / 2 - rank) * Np 166 | x = x * dx; 167 | for i in range(Np): 168 | y[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 169 | y = y * dy; 170 | 171 | for j in range(Np): 172 | kx[0:Nx, j] = range(int(-Nx / 2), int(Nx / 2)); 173 | for i in range(Nx): 174 | ky[i, 0:Np] = range(Np); 175 | if num_processes == 1: 176 | ky[i, 0:Ny] = range(int(-Ny / 2), int(Ny / 2)); 177 | # offset for mpi 178 | if num_processes != 1: 179 | ky = ky - (num_processes / 2 - rank) * Np 180 | 181 | k2 = kx ** 2 + ky ** 2; 182 | for i in range(Nx): 183 | for j in range(Np): 184 | if (k2[i, j] == 0): 185 | k2[i, j] = 1e-5; # so that I do not divide by 0 below when using 186 | # projection operator 187 | # k2_exp = exp(-nu * (k2 ** 5) * dt - nu_hypo * dt); 188 | k2_inv = K2_inv = 1 / where(k2 == 0, 1, k2).astype(float) 189 | return x, y, kx, ky, k2, k2_inv 190 | 191 | 192 | # -----------GRID setup----------- 193 | Lx = 2 * pi; 194 | Ly = 2 * pi; 195 | dx = Lx / Nx; 196 | dy = Ly / Ny; 197 | x, y, Kx, Ky, K2, K2_inv = IC_coor(Nx, Ny, Np, dx, dy, rank, num_processes) 198 | # TODO check what needs to be done with the wave numbers to get rid of IC_Coor function 199 | 200 | sx = slice(rank*Np,(rank+1)*Np) 201 | Xmesh = mgrid[sx, :N].astype(float) * Lx / N 202 | X = Xmesh[0] 203 | Y = Xmesh[1] 204 | 205 | 206 | x = Y[0] 207 | y = Y[0] 208 | kx = fftfreq(N, 1. / N) 209 | ky = kx.copy() 210 | ''' 211 | K = array(meshgrid(kx, ky[sx], indexing='ij'), dtype=int) 212 | Kx = K[1] 213 | Ky = K[0] 214 | K2 = sum(K * K, 0, dtype=int) 215 | ''' 216 | 217 | LapHat = K2.copy() 218 | LapHat *= -1 219 | #K2[0][0] = 1 220 | K2 *=-1 221 | K2_inv = 1 / where(K2 == 0, 1, K2).astype(float) 222 | ikx_over_K2 = 1j * Kx * K2_inv 223 | iky_over_K2 = 1j * Ky * K2_inv 224 | 225 | kmax_dealias = 2. / 3. * (N / 2 + 1) 226 | dealias = array((abs(Kx) < kmax_dealias) * (abs(Ky) < kmax_dealias), dtype=bool) 227 | 228 | # ----Initialize Variables-------(hat denotes variables in Fourier space,) 229 | u_hat = zeros((Nx, Np), dtype=complex); 230 | v_hat = zeros((Nx, Np), dtype=complex); 231 | u = zeros((Np, Ny), dtype=float); 232 | v = zeros((Np, Ny), dtype=float); 233 | omega_hat0 = zeros((Nx, Np), dtype=complex); 234 | omega_hat1 = zeros((Nx, Np), dtype=complex); 235 | omega_hat = zeros((Nx, Np), dtype=complex); 236 | omega_hat_new = zeros((Nx, Np), dtype=complex); 237 | omega = zeros((Np, Ny), dtype=float); 238 | omega_kx = zeros((Np, Ny), dtype=float); 239 | omega_ky = zeros((Np, Ny), dtype=float); 240 | v_grad_omega = zeros((Np, Ny), dtype=float); 241 | psi_hat = zeros((Nx, Np), dtype=complex); 242 | rhs_hat = zeros((Nx, Np), dtype=complex); 243 | rhs = zeros((Np, Nx), dtype=float); 244 | visc_term_complex = zeros((Ny, Np), dtype=complex) 245 | visc_term_real = zeros((Np, Ny), dtype=float) 246 | v_grad_omega_hat = zeros((Ny, Np), dtype=complex) 247 | 248 | # generate initial velocity field 249 | omega_hat_t0 = IC_condition(Nx, Np, u, v, u_hat, v_hat, ICchoice, omega, omega_hat, X, Y) 250 | omega = ifftn_mpi(omega_hat_t0,omega) 251 | 252 | step = 1 253 | try: 254 | pbar = tqdm(total=int(Nstep)) 255 | except: 256 | pass 257 | 258 | # ----Main Loop----------- 259 | for n in range(Nstep + 1): 260 | if n == 0: 261 | # TODO check what needs to be done to use IC from matlab program 262 | # TODO very low convection? bug? compare with animation plot on github 263 | omega_hat = omega_hat_t0 264 | 265 | u_hat = -iky_over_K2 * omega_hat 266 | v_hat = ikx_over_K2 * omega_hat 267 | u = ifftn_mpi(u_hat, u) 268 | v = ifftn_mpi(v_hat, v) 269 | 270 | omega_kx = ifftn_mpi(1j*Kx * omega_hat, omega_kx) 271 | omega_ky = ifftn_mpi(1j*Ky * omega_hat, omega_ky) 272 | v_grad_omega = (u * omega_kx + v * omega_ky) 273 | v_grad_omega_hat = fftn_mpi(v_grad_omega, v_grad_omega_hat)*dealias 274 | visc_term_complex = -nu * K2 * omega_hat 275 | 276 | omega_hat_new = 1 / (1 / dt - 0.5 * nu * LapHat)*( 277 | (1 / dt + 0.5 * nu * LapHat)* omega_hat - v_grad_omega_hat); 278 | omega_hat = omega_hat_new.copy() 279 | 280 | t = t + dt; 281 | step += 1 282 | try: 283 | pbar.update(1) 284 | except: 285 | pass 286 | -------------------------------------------------------------------------------- /3D/MASTER/3D_advection_mpi.py: -------------------------------------------------------------------------------- 1 | from numpy import * 2 | from numpy.fft import fftfreq, fft, ifft, irfft2, fftn,fftshift,rfft,irfft 3 | from mpi4py import MPI 4 | import time 5 | from mpistuff.mpibase import work_arrays 6 | work_array = work_arrays() 7 | 8 | #TODO Make nice comments on all the functions and different parts of the script 9 | ############################################################################################################################################# 10 | 11 | ## USER CHOICE DNS ## 12 | 13 | ############################################################################################################################################# 14 | nu = 0.000625 15 | Tend = 1 16 | dt = 0.01 17 | N_tsteps = int(ceil(Tend/dt)) 18 | IC = 'isotropic2' 19 | L = 2*pi 20 | eta = 2*pi*((1/nu)**(-3/4)) 21 | N = int(2 ** 6) 22 | kf = 8 23 | N_three = (N**3) 24 | ############################################################################################################################################# 25 | 26 | ## Hard constants for mesh and MPI communication 27 | 28 | ############################################################################################################################################# 29 | 30 | N_half = int(N / 2) 31 | N_nyquist=int(N/2+1) 32 | P1 = 1 33 | # Initialize MPI communication and set number of processes along each axis. 34 | comm = MPI.COMM_WORLD 35 | num_processes = comm.Get_size() 36 | if num_processes > 1: 37 | ############ 38 | # USER CHOICE 39 | # Make sure this number is smaller than nr of core available. 40 | P1 = 2 41 | ############ 42 | mpitype = MPI.DOUBLE_COMPLEX 43 | rank = comm.Get_rank() 44 | P2 = int(num_processes/P1) 45 | N1 = int(N/P1) 46 | N2 = int(N/P2) 47 | commxz = comm.Split(rank/P1) 48 | commxy = comm.Split(rank%P1) 49 | xzrank = commxz.Get_rank() 50 | xyrank = commxy.Get_rank() 51 | 52 | # Declaration of physical mesh 53 | x1 = slice(xzrank*N1,(xzrank+1)*N1,1) 54 | x2 = slice(xyrank*N2,(xyrank+1)*N2,1) 55 | X = mgrid[x1,x2,:N].astype(float32)*L/N 56 | randomNr = random.rand(N1,N2,N) 57 | 58 | # Declaration of wave numbers (Fourier space) 59 | kx = fftfreq(N, 1. / N) 60 | kz = kx[:(N_half)].copy(); 61 | kz[-1] *= -1 62 | k2 = slice(int(xyrank*N2),int((xyrank+1)*N2),1) 63 | k1 = slice(int(xzrank*N1/2),int(xzrank*N1/2 + N1/2),1) 64 | K = array(meshgrid(kx[k2],kx,kx[k1],indexing='ij'),dtype=int) 65 | 66 | 67 | 68 | # Preallocate arrays, decomposed using a 2D-pencil approach 69 | U = empty((3, N1, N2, N), dtype=float32) 70 | U_hat = empty((3, N2, N, int(N_half/P1)), dtype=complex) 71 | P = empty((N1, N2, N),dtype=float32) 72 | P_hat = empty((N2, N, int(N_half/P1)), dtype=complex) 73 | U_hat0 = empty((3, N2, N, int(N_half/P1)), dtype=complex) 74 | U_hat1 = empty((3, N2, N, int(N_half/P1)), dtype=complex) 75 | Uc_hat = empty((N, N2, int(N_half/P1)), dtype=complex) 76 | Uc_hatT = empty((N2, N, int(N_half/P1)), dtype=complex) 77 | U_mpi = empty((num_processes, N1, N2, N_half), dtype=complex) 78 | Uc_hat_x = empty((N, N2, int(N1/2)), dtype=complex) 79 | Uc_hat_y = empty((N2, N, int(N1/2)), dtype=complex) 80 | Uc_hat_z = empty((N1, N2, int(N_nyquist)), dtype=complex) 81 | Uc_hat_xr = empty((N, N2, int(N1/2)), dtype=complex) 82 | dU = empty((3, N2, N, int(N_half/P1)), dtype=complex) 83 | curl = empty((3, N1, N2, N),dtype=float32) 84 | 85 | # Precompute wave number operators and dealias matrix. 86 | K2 = sum(K * K, 0, dtype=int) 87 | K_over_K2 = K.astype(float32) / where(K2 == 0, 1, K2).astype(float32) 88 | kmax_dealias = 2. / 3. * (N_half) 89 | dealias = array( 90 | (abs(K[0]) < kmax_dealias) * (abs(K[1]) < kmax_dealias) * (abs(K[2]) < kmax_dealias), 91 | dtype=bool) 92 | k2_mask = where(K2 <= kf**2, 1, 0) 93 | 94 | 95 | # Runge Kutta constants 96 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 97 | b = [0.5, 0.5, 1.] 98 | 99 | def transform_Uc_zx(Uc_hat_z, Uc_hat_xr, P1): 100 | sz = Uc_hat_z.shape 101 | sx = Uc_hat_xr.shape 102 | Uc_hat_z[:, :, :-1] = rollaxis(Uc_hat_xr.reshape((P1, sz[0], sz[1], sx[2])), 0, 3).reshape((sz[0], sz[1], sz[2]-1)) 103 | return Uc_hat_z 104 | 105 | 106 | def transform_Uc_xy(Uc_hat_x, Uc_hat_y, P): 107 | sy = Uc_hat_y.shape 108 | sx = Uc_hat_x.shape 109 | Uc_hat_x[:] = rollaxis(Uc_hat_y.reshape((sy[0], P, sx[1], sx[2])), 1).reshape(sx) 110 | return Uc_hat_x 111 | 112 | 113 | def ifftn_mpi(fu,u): 114 | #TODO fix buffer error for np=1 115 | Uc_hat_y = work_array[((N2, N, int(N1/2)),complex, 0, False)] 116 | Uc_hat_z = work_array[((N1, N2, N_nyquist), complex, 0, False)] 117 | 118 | Uc_hat_x = work_array[((N, N2, int(N1 / 2)), complex, 0, False)] 119 | Uc_hat_xp = work_array[((N, N2, int(N1/2)), complex, 0, False)] 120 | xy_plane = work_array[((N, N2), complex, 0, False)] 121 | xy_recv = work_array[((N1, N2), complex, 0, False)] 122 | 123 | # Do first owned direction 124 | Uc_hat_y = ifft(fu, axis=1) 125 | # Transform to x 126 | Uc_hat_xp = transform_Uc_xy(Uc_hat_xp, Uc_hat_y, P2) 127 | 128 | ####### Not in-place 129 | # Communicate in xz-plane and do fft in x-direction 130 | Uc_hat_xp2 = work_array[((N, N2, int(N1/2)), complex, 1, False)] 131 | commxy.Alltoall([Uc_hat_xp, mpitype], [Uc_hat_xp2, mpitype]) 132 | Uc_hat_xp = ifft(Uc_hat_xp2, axis=0) 133 | 134 | Uc_hat_x2 = work_array[((N, N2, int(N1 / 2)), complex, 1, False)] 135 | Uc_hat_x2[:] = Uc_hat_xp[:, :, :int(N1 / 2)] 136 | 137 | # Communicate and transform in xy-plane all but k=N//2 138 | commxz.Alltoall([Uc_hat_x2, mpitype], [Uc_hat_x, mpitype]) 139 | ######################### 140 | 141 | Uc_hat_z[:] = transform_Uc_zx(Uc_hat_z, Uc_hat_x, P1) 142 | 143 | xy_plane[:] = Uc_hat_xp[:, :, -1] 144 | commxz.Scatter(xy_plane, xy_recv, root=P1 - 1) 145 | Uc_hat_z[:, :, -1] = xy_recv 146 | 147 | # Do ifft for z-direction 148 | u = irfft(Uc_hat_z, axis=2) 149 | return u 150 | 151 | 152 | def fftn_mpi(u, fu): 153 | # FFT in three directions using MPI and the pencil decomposition 154 | Uc_hat_z[:]=rfft(u,axis=2) 155 | 156 | # Transform to x direction neglecting neglecting k=N/2 (Nyquist) 157 | Uc_hat_x[:] = rollaxis(Uc_hat_z[:,:,:-1].reshape((N1,N2,P1,int(N1/2))),2).reshape(Uc_hat_x.shape) 158 | 159 | # Communicate and do FFT in x-direction 160 | commxz.Alltoall([Uc_hat_x,mpitype],[Uc_hat_xr,mpitype]) 161 | Uc_hat_x[:]=fft(Uc_hat_xr,axis=0) 162 | 163 | # Communicate and do fft in y-direction 164 | commxy.Alltoall([Uc_hat_x, mpitype], [Uc_hat_xr, mpitype]) 165 | Uc_hat_y[:] = rollaxis(Uc_hat_xr.reshape((P2,N2,N2,int(N_half/P1))),1).reshape(Uc_hat_y.shape) 166 | 167 | fu[:]=fft(Uc_hat_y,axis=1) 168 | return fu 169 | 170 | def Cross(a, b, c): 171 | c[0] = fftn_mpi(a[1]*b[2]-a[2]*b[1], c[0]) 172 | c[1] = fftn_mpi(a[2]*b[0]-a[0]*b[2], c[1]) 173 | c[2] = fftn_mpi(a[0]*b[1]-a[1]*b[0], c[2]) 174 | return c 175 | #@profile 176 | def Curl(a, c): 177 | c[2] = ifftn_mpi(1j*(K[0]*a[1]-K[1]*a[0]), c[2]) 178 | c[1] = ifftn_mpi(1j*(K[2]*a[0]-K[0]*a[2]), c[1]) 179 | c[0] = ifftn_mpi(1j*(K[1]*a[2]-K[2]*a[1]), c[0]) 180 | return c 181 | 182 | 183 | def initialize2(rank,K,dealias,K2,N,U_hat): 184 | random.seed(rank) 185 | k = sqrt(K2) 186 | k = where(k == 0, 1, k) 187 | kk = K2.copy() 188 | kk = where(kk == 0, 1, kk) 189 | k1, k2, k3 = K[0], K[1], K[2] 190 | ksq = sqrt(k1 ** 2 + k2 ** 2) 191 | ksq = where(ksq == 0, 1, ksq) 192 | 193 | if (N == (2 ** 5)): 194 | C = 260 195 | a = 2.5 196 | elif (N == (2 ** 6)): 197 | C = 2600 198 | a = 3.5 199 | elif (N == (2 ** 7)): 200 | C = 5000 201 | a = 3.5 202 | elif(N==(2**8)): 203 | C=2600 204 | a=3.5 205 | elif (N == (2 ** 9)): 206 | C = 10000 207 | a = 9.5 208 | Ek = (C*abs(k)*2*N_three/((2*pi)**3))*exp((-abs(kk))/(a**2)) 209 | # theta1, theta2, phi, alpha and beta from [1] 210 | theta1, theta2, phi = random.sample(U_hat.shape) * 2j * pi 211 | alpha = sqrt(Ek / 4. / pi / kk) * exp(1j * theta1) * cos(phi) 212 | beta = sqrt(Ek / 4. / pi / kk) * exp(1j * theta2) * sin(phi) 213 | U_hat[0] = (alpha * k * k2 + beta * k1 * k3) / (k * ksq) 214 | U_hat[1] = (beta * k2 * k3 - alpha * k * k1) / (k * ksq) 215 | U_hat[2] = beta * ksq / k 216 | 217 | # project to zero divergence 218 | U_hat[:] -= (K[0] * U_hat[0] + K[1] * U_hat[1] + K[2] * U_hat[2]) * K_over_K2 219 | 220 | ''' 221 | energy = 0.5 * integralEnergy(comm,U_hat) 222 | U_hat *= sqrt(target / energy) 223 | energy= 0.5 * integralEnergy(comm,U_hat) 224 | ''' 225 | return U_hat 226 | 227 | 228 | 229 | def computeRHS(dU, rk): 230 | # Compute residual of time integral as specified in pseudo spectral Galerkin method 231 | # TODO add forcing term here? 232 | if rk > 0: 233 | for i in range(3): 234 | U[i] = ifftn_mpi(U_hat[i], U[i]) 235 | 236 | curl[:] = Curl(U_hat, curl) 237 | dU = Cross(U, curl, dU) 238 | dU *= dealias 239 | P_hat[:] = sum(dU * K_over_K2, 0, out=P_hat) 240 | dU -= P_hat * K 241 | dU -= nu * K2 * U_hat 242 | 243 | #dU += (force*U_hat*k2_mask/(2*kinBand)) 244 | return dU 245 | 246 | 247 | 248 | def mpiPrintIteration(tstep): 249 | if rank == 0: 250 | # progressfile.write("tstep= %d\r\n" % (tstep),flush=True) 251 | print('tstep= %d\r\n' % (tstep), flush=True) 252 | 253 | 254 | 255 | if __name__ == '__main__': 256 | # initial condition and transformation to Fourier space 257 | if IC == 'isotropic2': 258 | U_hat = initialize2(rank, K, dealias, K2,N,U_hat) 259 | for i in range(3): 260 | U[i] = ifftn_mpi(U_hat[i], U[i]) 261 | if IC == 'TG': 262 | U[0] = sin(X[0]) * cos(X[1]) * cos(X[2]) 263 | U[1] = -cos(X[0]) * sin(X[1]) * cos(X[2]) 264 | U[2] = 0 265 | for i in range(3): 266 | U_hat[i] = fftn_mpi(U[i], U_hat[i]) 267 | 268 | 269 | t = 0.0 270 | tstep = 0 271 | speedList = [] 272 | 273 | while t < Tend - 1e-8: 274 | # Time integral using a Runge Kutta scheme 275 | t += dt; 276 | U_hat1[:] = U_hat0[:] = U_hat 277 | 278 | for rk in range(4): 279 | # Run RK4 temporal integral method 280 | 281 | if rank==0: 282 | start = time.time() 283 | dU = computeRHS(dU, rk) 284 | if rank==0: 285 | end = time.time() 286 | speed=(end-start) 287 | speedList.append(speed) 288 | if rk < 3: U_hat[:] = U_hat0 + b[rk] * dt * dU 289 | U_hat1[:] += a[rk] * dt * dU 290 | 291 | U_hat[:] = U_hat1[:] 292 | 293 | tstep += 1 294 | mpiPrintIteration(tstep) 295 | if rank==0: 296 | save('./speed_files/speedTest_t100_np'+str(num_processes)+'.npy',speedList) 297 | -------------------------------------------------------------------------------- /3D/MASTER/3D_dns_speedTest_Vilje.py: -------------------------------------------------------------------------------- 1 | from numpy import * 2 | from numpy.fft import fftfreq, fft, ifft, irfft2, fftn,fftshift,rfft,irfft 3 | from mpi4py import MPI 4 | import time 5 | from mpistuff.mpibase import work_arrays 6 | work_array = work_arrays() 7 | 8 | #TODO Make nice comments on all the functions and different parts of the script 9 | ############################################################################################################################################# 10 | 11 | ## USER CHOICE DNS ## 12 | 13 | ############################################################################################################################################# 14 | nu = 0.000625 15 | Tend = 1 16 | dt = 0.01 17 | N_tsteps = int(ceil(Tend/dt)) 18 | IC = 'isotropic2' 19 | L = 2*pi 20 | eta = 2*pi*((1/nu)**(-3/4)) 21 | N = int(2 ** 6) 22 | kf = 8 23 | N_three = (N**3) 24 | ############################################################################################################################################# 25 | 26 | ## Hard constants for mesh and MPI communication 27 | 28 | ############################################################################################################################################# 29 | 30 | N_half = int(N / 2) 31 | N_nyquist=int(N/2+1) 32 | P1 = 1 33 | # Initialize MPI communication and set number of processes along each axis. 34 | comm = MPI.COMM_WORLD 35 | num_processes = comm.Get_size() 36 | if num_processes > 1: 37 | ############ 38 | # USER CHOICE 39 | # Make sure this number is smaller than nr of core available. 40 | P1 = 2 41 | ############ 42 | mpitype = MPI.DOUBLE_COMPLEX 43 | rank = comm.Get_rank() 44 | P2 = int(num_processes/P1) 45 | N1 = int(N/P1) 46 | N2 = int(N/P2) 47 | commxz = comm.Split(rank/P1) 48 | commxy = comm.Split(rank%P1) 49 | xzrank = commxz.Get_rank() 50 | xyrank = commxy.Get_rank() 51 | 52 | # Declaration of physical mesh 53 | x1 = slice(xzrank*N1,(xzrank+1)*N1,1) 54 | x2 = slice(xyrank*N2,(xyrank+1)*N2,1) 55 | X = mgrid[x1,x2,:N].astype(float32)*L/N 56 | randomNr = random.rand(N1,N2,N) 57 | 58 | # Declaration of wave numbers (Fourier space) 59 | kx = fftfreq(N, 1. / N) 60 | kz = kx[:(N_half)].copy(); 61 | kz[-1] *= -1 62 | k2 = slice(int(xyrank*N2),int((xyrank+1)*N2),1) 63 | k1 = slice(int(xzrank*N1/2),int(xzrank*N1/2 + N1/2),1) 64 | K = array(meshgrid(kx[k2],kx,kx[k1],indexing='ij'),dtype=int) 65 | 66 | 67 | 68 | # Preallocate arrays, decomposed using a 2D-pencil approach 69 | U = empty((3, N1, N2, N), dtype=float32) 70 | U_hat = empty((3, N2, N, int(N_half/P1)), dtype=complex) 71 | P = empty((N1, N2, N),dtype=float32) 72 | P_hat = empty((N2, N, int(N_half/P1)), dtype=complex) 73 | U_hat0 = empty((3, N2, N, int(N_half/P1)), dtype=complex) 74 | U_hat1 = empty((3, N2, N, int(N_half/P1)), dtype=complex) 75 | Uc_hat = empty((N, N2, int(N_half/P1)), dtype=complex) 76 | Uc_hatT = empty((N2, N, int(N_half/P1)), dtype=complex) 77 | U_mpi = empty((num_processes, N1, N2, N_half), dtype=complex) 78 | Uc_hat_x = empty((N, N2, int(N1/2)), dtype=complex) 79 | Uc_hat_y = empty((N2, N, int(N1/2)), dtype=complex) 80 | Uc_hat_z = empty((N1, N2, int(N_nyquist)), dtype=complex) 81 | Uc_hat_xr = empty((N, N2, int(N1/2)), dtype=complex) 82 | dU = empty((3, N2, N, int(N_half/P1)), dtype=complex) 83 | curl = empty((3, N1, N2, N),dtype=float32) 84 | 85 | # Precompute wave number operators and dealias matrix. 86 | K2 = sum(K * K, 0, dtype=int) 87 | K_over_K2 = K.astype(float32) / where(K2 == 0, 1, K2).astype(float32) 88 | kmax_dealias = 2. / 3. * (N_half) 89 | dealias = array( 90 | (abs(K[0]) < kmax_dealias) * (abs(K[1]) < kmax_dealias) * (abs(K[2]) < kmax_dealias), 91 | dtype=bool) 92 | k2_mask = where(K2 <= kf**2, 1, 0) 93 | 94 | 95 | # Runge Kutta constants 96 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 97 | b = [0.5, 0.5, 1.] 98 | 99 | def transform_Uc_zx(Uc_hat_z, Uc_hat_xr, P1): 100 | sz = Uc_hat_z.shape 101 | sx = Uc_hat_xr.shape 102 | Uc_hat_z[:, :, :-1] = rollaxis(Uc_hat_xr.reshape((P1, sz[0], sz[1], sx[2])), 0, 3).reshape((sz[0], sz[1], sz[2]-1)) 103 | return Uc_hat_z 104 | 105 | 106 | def transform_Uc_xy(Uc_hat_x, Uc_hat_y, P): 107 | sy = Uc_hat_y.shape 108 | sx = Uc_hat_x.shape 109 | Uc_hat_x[:] = rollaxis(Uc_hat_y.reshape((sy[0], P, sx[1], sx[2])), 1).reshape(sx) 110 | return Uc_hat_x 111 | 112 | 113 | def ifftn_mpi(fu,u): 114 | #TODO fix buffer error for np=1 115 | Uc_hat_y = work_array[((N2, N, int(N1/2)),complex, 0, False)] 116 | Uc_hat_z = work_array[((N1, N2, N_nyquist), complex, 0, False)] 117 | 118 | Uc_hat_x = work_array[((N, N2, int(N1 / 2)), complex, 0, False)] 119 | Uc_hat_xp = work_array[((N, N2, int(N1/2)), complex, 0, False)] 120 | xy_plane = work_array[((N, N2), complex, 0, False)] 121 | xy_recv = work_array[((N1, N2), complex, 0, False)] 122 | 123 | # Do first owned direction 124 | Uc_hat_y = ifft(fu, axis=1) 125 | # Transform to x 126 | Uc_hat_xp = transform_Uc_xy(Uc_hat_xp, Uc_hat_y, P2) 127 | 128 | ####### Not in-place 129 | # Communicate in xz-plane and do fft in x-direction 130 | Uc_hat_xp2 = work_array[((N, N2, int(N1/2)), complex, 1, False)] 131 | commxy.Alltoall([Uc_hat_xp, mpitype], [Uc_hat_xp2, mpitype]) 132 | Uc_hat_xp = ifft(Uc_hat_xp2, axis=0) 133 | 134 | Uc_hat_x2 = work_array[((N, N2, int(N1 / 2)), complex, 1, False)] 135 | Uc_hat_x2[:] = Uc_hat_xp[:, :, :int(N1 / 2)] 136 | 137 | # Communicate and transform in xy-plane all but k=N//2 138 | commxz.Alltoall([Uc_hat_x2, mpitype], [Uc_hat_x, mpitype]) 139 | ######################### 140 | 141 | Uc_hat_z[:] = transform_Uc_zx(Uc_hat_z, Uc_hat_x, P1) 142 | 143 | xy_plane[:] = Uc_hat_xp[:, :, -1] 144 | commxz.Scatter(xy_plane, xy_recv, root=P1 - 1) 145 | Uc_hat_z[:, :, -1] = xy_recv 146 | 147 | # Do ifft for z-direction 148 | u = irfft(Uc_hat_z, axis=2) 149 | return u 150 | 151 | 152 | def fftn_mpi(u, fu): 153 | # FFT in three directions using MPI and the pencil decomposition 154 | Uc_hat_z[:]=rfft(u,axis=2) 155 | 156 | # Transform to x direction neglecting neglecting k=N/2 (Nyquist) 157 | Uc_hat_x[:] = rollaxis(Uc_hat_z[:,:,:-1].reshape((N1,N2,P1,int(N1/2))),2).reshape(Uc_hat_x.shape) 158 | 159 | # Communicate and do FFT in x-direction 160 | commxz.Alltoall([Uc_hat_x,mpitype],[Uc_hat_xr,mpitype]) 161 | Uc_hat_x[:]=fft(Uc_hat_xr,axis=0) 162 | 163 | # Communicate and do fft in y-direction 164 | commxy.Alltoall([Uc_hat_x, mpitype], [Uc_hat_xr, mpitype]) 165 | Uc_hat_y[:] = rollaxis(Uc_hat_xr.reshape((P2,N2,N2,int(N_half/P1))),1).reshape(Uc_hat_y.shape) 166 | 167 | fu[:]=fft(Uc_hat_y,axis=1) 168 | return fu 169 | 170 | def Cross(a, b, c): 171 | c[0] = fftn_mpi(a[1]*b[2]-a[2]*b[1], c[0]) 172 | c[1] = fftn_mpi(a[2]*b[0]-a[0]*b[2], c[1]) 173 | c[2] = fftn_mpi(a[0]*b[1]-a[1]*b[0], c[2]) 174 | return c 175 | #@profile 176 | def Curl(a, c): 177 | c[2] = ifftn_mpi(1j*(K[0]*a[1]-K[1]*a[0]), c[2]) 178 | c[1] = ifftn_mpi(1j*(K[2]*a[0]-K[0]*a[2]), c[1]) 179 | c[0] = ifftn_mpi(1j*(K[1]*a[2]-K[2]*a[1]), c[0]) 180 | return c 181 | 182 | 183 | def initialize2(rank,K,dealias,K2,N,U_hat): 184 | random.seed(rank) 185 | k = sqrt(K2) 186 | k = where(k == 0, 1, k) 187 | kk = K2.copy() 188 | kk = where(kk == 0, 1, kk) 189 | k1, k2, k3 = K[0], K[1], K[2] 190 | ksq = sqrt(k1 ** 2 + k2 ** 2) 191 | ksq = where(ksq == 0, 1, ksq) 192 | 193 | if (N == (2 ** 5)): 194 | C = 260 195 | a = 2.5 196 | elif (N == (2 ** 6)): 197 | C = 2600 198 | a = 3.5 199 | elif (N == (2 ** 7)): 200 | C = 5000 201 | a = 3.5 202 | elif(N==(2**8)): 203 | C=2600 204 | a=3.5 205 | elif (N == (2 ** 9)): 206 | C = 10000 207 | a = 9.5 208 | Ek = (C*abs(k)*2*N_three/((2*pi)**3))*exp((-abs(kk))/(a**2)) 209 | # theta1, theta2, phi, alpha and beta from [1] 210 | theta1, theta2, phi = random.sample(U_hat.shape) * 2j * pi 211 | alpha = sqrt(Ek / 4. / pi / kk) * exp(1j * theta1) * cos(phi) 212 | beta = sqrt(Ek / 4. / pi / kk) * exp(1j * theta2) * sin(phi) 213 | U_hat[0] = (alpha * k * k2 + beta * k1 * k3) / (k * ksq) 214 | U_hat[1] = (beta * k2 * k3 - alpha * k * k1) / (k * ksq) 215 | U_hat[2] = beta * ksq / k 216 | 217 | # project to zero divergence 218 | U_hat[:] -= (K[0] * U_hat[0] + K[1] * U_hat[1] + K[2] * U_hat[2]) * K_over_K2 219 | 220 | ''' 221 | energy = 0.5 * integralEnergy(comm,U_hat) 222 | U_hat *= sqrt(target / energy) 223 | energy= 0.5 * integralEnergy(comm,U_hat) 224 | ''' 225 | return U_hat 226 | 227 | 228 | 229 | def computeRHS(dU, rk): 230 | # Compute residual of time integral as specified in pseudo spectral Galerkin method 231 | # TODO add forcing term here? 232 | if rk > 0: 233 | for i in range(3): 234 | U[i] = ifftn_mpi(U_hat[i], U[i]) 235 | 236 | curl[:] = Curl(U_hat, curl) 237 | dU = Cross(U, curl, dU) 238 | dU *= dealias 239 | P_hat[:] = sum(dU * K_over_K2, 0, out=P_hat) 240 | dU -= P_hat * K 241 | dU -= nu * K2 * U_hat 242 | 243 | #dU += (force*U_hat*k2_mask/(2*kinBand)) 244 | return dU 245 | 246 | 247 | 248 | def mpiPrintIteration(tstep): 249 | if rank == 0: 250 | # progressfile.write("tstep= %d\r\n" % (tstep),flush=True) 251 | print('tstep= %d\r\n' % (tstep), flush=True) 252 | 253 | 254 | 255 | if __name__ == '__main__': 256 | # initial condition and transformation to Fourier space 257 | if IC == 'isotropic2': 258 | U_hat = initialize2(rank, K, dealias, K2,N,U_hat) 259 | for i in range(3): 260 | U[i] = ifftn_mpi(U_hat[i], U[i]) 261 | if IC == 'TG': 262 | U[0] = sin(X[0]) * cos(X[1]) * cos(X[2]) 263 | U[1] = -cos(X[0]) * sin(X[1]) * cos(X[2]) 264 | U[2] = 0 265 | for i in range(3): 266 | U_hat[i] = fftn_mpi(U[i], U_hat[i]) 267 | 268 | 269 | t = 0.0 270 | tstep = 0 271 | speedList = [] 272 | 273 | while t < Tend - 1e-8: 274 | # Time integral using a Runge Kutta scheme 275 | t += dt; 276 | U_hat1[:] = U_hat0[:] = U_hat 277 | 278 | for rk in range(4): 279 | # Run RK4 temporal integral method 280 | 281 | if rank==0: 282 | start = time.time() 283 | dU = computeRHS(dU, rk) 284 | if rank==0: 285 | end = time.time() 286 | speed=(end-start) 287 | speedList.append(speed) 288 | if rk < 3: U_hat[:] = U_hat0 + b[rk] * dt * dU 289 | U_hat1[:] += a[rk] * dt * dU 290 | 291 | U_hat[:] = U_hat1[:] 292 | 293 | tstep += 1 294 | mpiPrintIteration(tstep) 295 | if rank==0: 296 | save('./speed_files/speedTest_t100_np'+str(num_processes)+'.npy',speedList) 297 | -------------------------------------------------------------------------------- /3D/MASTER/3d_dns_SLAB.py: -------------------------------------------------------------------------------- 1 | from time import time 2 | from numpy import * 3 | from numpy.fft import fftfreq, fft, ifft, irfft2, rfft2 4 | from mpi4py import MPI 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | 9 | nu = 0.000625 10 | T = 0.1 11 | dt = 0.01 12 | N = 2**4 13 | comm = MPI.COMM_WORLD 14 | num_processes = comm.Get_size() 15 | rank = comm.Get_rank() 16 | Np = N // num_processes 17 | X = mgrid[rank*Np:(rank+1)*Np, :N, :N].astype(float)*2*pi/N 18 | U = empty((3, Np, N, N)) 19 | U_hat = empty((3, N, Np, N//2+1), dtype=complex) 20 | P = empty((Np, N, N)) 21 | P_hat = empty((N, Np, N//2+1), dtype=complex) 22 | U_hat0 = empty((3, N, Np, N//2+1), dtype=complex) 23 | U_hat1 = empty((3, N, Np, N//2+1), dtype=complex) 24 | dU = empty((3, N, Np, N//2+1), dtype=complex) 25 | Uc_hat = empty((N, Np, N//2+1), dtype=complex) 26 | Uc_hatT = empty((Np, N, N//2+1), dtype=complex) 27 | curl = empty((3, Np, N, N)) 28 | kx = fftfreq(N, 1./N) 29 | kz = kx[:(N//2+1)].copy() 30 | kz[-1] *= -1 31 | K = array(meshgrid(kx, kx[rank*Np:(rank+1)*Np], kz, indexing='ij'), dtype=int) 32 | K2 = sum(K*K, 0, dtype=int) 33 | K_over_K2 = K.astype(float) / where(K2 == 0, 1, K2).astype(float) 34 | kmax_dealias = 2./3.*(N//2+1) 35 | dealias = array((abs(K[0]) < kmax_dealias)*(abs(K[1]) < kmax_dealias)* 36 | (abs(K[2]) < kmax_dealias), dtype=bool) 37 | a = [1./6., 1./3., 1./3., 1./6.] 38 | b = [0.5, 0.5, 1.] 39 | 40 | def fftn_mpi(u, fu): 41 | Uc_hatT[:] = rfft2(u, axes=(1, 2)) 42 | 43 | fu[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np, N//2+1), 1).reshape(fu.shape) 44 | comm.Alltoall(MPI.IN_PLACE, [fu, MPI.DOUBLE_COMPLEX]) 45 | fu[:] = fft(fu, axis=0) 46 | return fu 47 | 48 | def ifftn_mpi(fu, u): 49 | Uc_hat[:] = ifft(fu, axis=0) 50 | comm.Alltoall(MPI.IN_PLACE, [Uc_hat, MPI.DOUBLE_COMPLEX]) 51 | Uc_hatT[:] = rollaxis(Uc_hat.reshape((num_processes, Np, Np, N//2+1)), 1).reshape(Uc_hatT.shape) 52 | u[:] = irfft2(Uc_hatT, axes=(1, 2)) 53 | return u 54 | 55 | def Cross(a, b, c): 56 | c[0] = fftn_mpi(a[1]*b[2]-a[2]*b[1], c[0]) 57 | c[1] = fftn_mpi(a[2]*b[0]-a[0]*b[2], c[1]) 58 | c[2] = fftn_mpi(a[0]*b[1]-a[1]*b[0], c[2]) 59 | return c 60 | #@profile 61 | def Curl(a, c): 62 | c[2] = ifftn_mpi(1j*(K[0]*a[1]-K[1]*a[0]), c[2]) 63 | c[1] = ifftn_mpi(1j*(K[2]*a[0]-K[0]*a[2]), c[1]) 64 | c[0] = ifftn_mpi(1j*(K[1]*a[2]-K[2]*a[1]), c[0]) 65 | return c 66 | #@profile 67 | def ComputeRHS(dU, rk): 68 | if rk > 0: 69 | for i in range(3): 70 | U[i] = ifftn_mpi(U_hat[i], U[i]) 71 | curl[:] = Curl(U_hat, curl) 72 | dU = Cross(U, curl, dU) 73 | dU *= dealias 74 | P_hat[:] = sum(dU*K_over_K2, 0, out=P_hat) 75 | dU -= P_hat*K 76 | dU -= nu*K2*U_hat 77 | return dU 78 | 79 | U[0] = sin(X[0])*cos(X[1])*cos(X[2]) 80 | U[1] = -cos(X[0])*sin(X[1])*cos(X[2]) 81 | U[2] = 0 82 | 83 | 84 | 85 | for i in range(3): 86 | U_hat[i] = fftn_mpi(U[i], U_hat[i]) 87 | 88 | 89 | for i in range(3): 90 | U[i] = ifftn_mpi(U_hat[i], U[i]) 91 | 92 | 93 | 94 | ''' 95 | 96 | t = 0.0 97 | tstep = 0 98 | t0 = time() 99 | while t < T-1e-8: 100 | t += dt 101 | tstep += 1 102 | U_hat1[:] = U_hat0[:] = U_hat 103 | for rk in range(4): 104 | dU = ComputeRHS(dU, rk) 105 | if rk < 3: 106 | U_hat[:] = U_hat0 + b[rk]*dt*dU 107 | U_hat1[:] += a[rk]*dt*dU 108 | U_hat[:] = U_hat1[:] 109 | for i in range(3): 110 | U[i] = ifftn_mpi(U_hat[i], U[i]) 111 | 112 | k = comm.reduce(0.5*sum(U*U)*(1./N)**3) 113 | if rank == 0: 114 | print("Time = {}".format(time()-t0)) 115 | assert round(k - 0.124953117517, 7) == 0 116 | ''' -------------------------------------------------------------------------------- /3D/MASTER/MPI_func/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielhalvorsen/Project_Turbulence_Modelling/4cb925604ace9f065017e0cbb41ec4b19a00f065/3D/MASTER/MPI_func/__init__.py -------------------------------------------------------------------------------- /3D/MASTER/MPI_func/mpibase.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mpi4py import MPI 3 | import collections 4 | 5 | # Possible way to give numpy arrays attributes... 6 | class Empty(np.ndarray): 7 | #"""Numpy empty array with additional info dictionary to hold attributes 8 | #""" 9 | def __new__(subtype, shape, dtype=np.float, info={}): 10 | obj = np.ndarray.__new__(subtype, shape, dtype) 11 | obj.info = info 12 | return obj 13 | 14 | def __array_finalize__(self, obj): 15 | if obj is None: return 16 | self.info = getattr(obj, 'info', {}) 17 | 18 | class Zeros(np.ndarray): 19 | #"""Numpy zeros array with additional info dictionary to hold attributes 20 | #""" 21 | def __new__(subtype, shape, dtype=float, info={}): 22 | obj = np.ndarray.__new__(subtype, shape, dtype) 23 | obj.fill(0) 24 | obj.info = info 25 | return obj 26 | 27 | def __array_finalize__(self, obj): 28 | if obj is None: return 29 | self.info = getattr(obj, 'info', {}) 30 | 31 | 32 | 33 | try: 34 | import pyfftw 35 | def empty(N, dtype=np.float, bytes=16): 36 | return pyfftw.empty_aligned(N, dtype=dtype, n=bytes) 37 | 38 | def zeros(N, dtype=np.float, bytes=16): 39 | return pyfftw.zeros_aligned(N, dtype=dtype, n=bytes) 40 | 41 | except ImportError: 42 | def empty(N, dtype=np.float, bytes=None): 43 | return Empty(N, dtype=dtype) 44 | 45 | def zeros(N, dtype=np.float, bytes=None): 46 | return Zeros(N, dtype=dtype) 47 | 48 | class work_array_dict(dict): 49 | """Dictionary of work arrays indexed by their shape, type and an indicator i.""" 50 | def __missing__(self, key): 51 | shape, dtype, i = key 52 | a = zeros(shape, dtype=dtype) 53 | self[key] = a 54 | return self[key] 55 | 56 | class work_arrays(collections.MutableMapping): 57 | """A dictionary to hold numpy work arrays. 58 | The dictionary allows two types of keys for the same item. 59 | keys: 60 | - (shape, dtype, index (, fillzero)), where shape is tuple, dtype is np.dtype and 61 | index an integer 62 | - (ndarray, index (, fillzero)), where ndarray is a numpy array and index is 63 | an integer 64 | fillzero is an optional bool that determines 65 | whether the array is initialised to zero 66 | Usage: 67 | To create two real work arrays of shape (3,3), do: 68 | - work = workarrays() 69 | - a = work[((3,3), np.float, 0)] 70 | - b = work[(a, 1)] 71 | Returns: 72 | Numpy array of given shape. The array is by default initialised to zero, but this 73 | can be overridden using the fillzero argument. 74 | """ 75 | 76 | def __init__(self): 77 | self.store = work_array_dict() 78 | self.fillzero = True 79 | 80 | def __getitem__(self, key): 81 | val = self.store[self.__keytransform__(key)] 82 | if self.fillzero is True: val.fill(0) 83 | return val 84 | 85 | def __setitem__(self, key, value): 86 | self.store[self.__keytransform__(key)] = value 87 | 88 | def __delitem__(self, key): 89 | del self.store[self.__keytransform__(key)] 90 | 91 | def __iter__(self): 92 | return iter(self.store) 93 | 94 | def __len__(self): 95 | return len(self.store) 96 | 97 | def values(self): 98 | raise TypeError('Work arrays not iterable') 99 | 100 | def __keytransform__(self, key): 101 | if isinstance(key[0], np.ndarray): 102 | shape = key[0].shape 103 | dtype = key[0].dtype 104 | i = key[1] 105 | zero = True if len(key) == 2 else key[2] 106 | 107 | elif isinstance(key[0], tuple): 108 | if len(key) == 3: 109 | shape, dtype, i = key 110 | zero = True 111 | 112 | elif len(key) == 4: 113 | shape, dtype, i, zero = key 114 | 115 | else: 116 | raise TypeError("Wrong type of key for work array") 117 | 118 | assert isinstance(zero, bool) 119 | assert isinstance(i, int) 120 | self.fillzero = zero 121 | return (shape, np.dtype(dtype), i) 122 | 123 | def datatypes(precision): 124 | """Return datatypes associated with precision.""" 125 | assert precision in ("single", "double") 126 | return {"single": (np.float32, np.complex64, MPI.C_FLOAT_COMPLEX), 127 | "double": (np.float64, np.complex128, MPI.C_DOUBLE_COMPLEX)}[precision] -------------------------------------------------------------------------------- /3D/MASTER/Particle_mpi.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.ticker as tck 4 | try: 5 | from tqdm import tqdm 6 | except ImportError: 7 | pass 8 | from scipy.interpolate import RegularGridInterpolator 9 | from mpl_toolkits.mplot3d import Axes3D 10 | from numba import jit 11 | from mpi4py import MPI 12 | from functools import partial 13 | #from multiprocessing import Pool 14 | 15 | 16 | class Interpolator(): 17 | """ Interpolating the datasets velocity components using regular grid interpolator (linear) 18 | interpolation over a rectangular mesh. 19 | The memberfunction get_interpolators returns functions for the 20 | velocity components' interpolated value at arbitrary positions. 21 | 22 | Parameters 23 | ---------- 24 | dataset : xarray_type 25 | Data structure containing the oceanographic data. 26 | X : array_type 27 | Particle coordinates. 28 | t : datetime64_type 29 | Time. 30 | ---------- 31 | """ 32 | 33 | def __init__(self, velocityField): 34 | self.dataset = velocityField 35 | self.L = 2*np.pi 36 | self.N = len(self.dataset[0]) 37 | self.x_vec = np.arange(0, self.N, 1) * self.L / self.N 38 | self.y_vec = np.arange(0, self.N, 1) * self.L / self.N 39 | self.z_vec = np.arange(0, self.N, 1) * self.L / self.N 40 | 41 | def get_interpolators(self, X,t): 42 | # Add a buffer of cells around the extent of the particle cloud 43 | if t<100: 44 | buf = 3 45 | else: 46 | buf=0 47 | # Find extent of particle cloud in terms of indices 48 | imax = np.searchsorted(self.x_vec, np.amax(X[0, :])) + buf 49 | imin = np.searchsorted(self.x_vec, np.amin(X[0, :])) - buf 50 | jmax = np.searchsorted(self.y_vec, np.amax(X[1, :])) + buf 51 | jmin = np.searchsorted(self.y_vec, np.amin(X[1, :])) - buf 52 | kmax = np.searchsorted(self.z_vec, np.amax(X[2, :])) + buf 53 | kmin = np.searchsorted(self.z_vec, np.amin(X[2, :])) - buf 54 | # Take out subset of array, to pass to RectBivariateSpline 55 | # Transpose to get regular order of coordinates (x,y) 56 | # Fill NaN values (land cells) with 0, otherwise 57 | # interpolation won't work 58 | u = self.dataset[0, imin:imax, jmin:jmax,kmin:kmax] 59 | v = self.dataset[1, imin:imax, jmin:jmax,kmin:kmax] 60 | w = self.dataset[2, imin:imax, jmin:jmax, kmin:kmax] 61 | self.dataset=None 62 | xslice = self.x_vec[imin:imax] 63 | yslice = self.y_vec[jmin:jmax] 64 | zslice = self.z_vec[kmin:kmax] 65 | # RectBivariateSpline returns a function-like object, 66 | # which can be called to get value at arbitrary position 67 | fu = RegularGridInterpolator((xslice,yslice,zslice),u,method='linear',bounds_error=False,fill_value=None) 68 | del(u) 69 | fv = RegularGridInterpolator((xslice,yslice,zslice),v,method='linear',bounds_error=False,fill_value=None) 70 | del(v) 71 | fw = RegularGridInterpolator((xslice,yslice,zslice),w,method='linear',bounds_error=False,fill_value=None) 72 | del(w) 73 | return fu, fv, fw 74 | 75 | def __call__(self, X,t): 76 | #X = np.where(X > (L - ldx), X - L, X) 77 | #X = np.where(X < 0, X + (L-ldx), X) 78 | 79 | # get index of current time in dataset 80 | # get interpolating functions, 81 | # covering the extent of the particle 82 | fu, fv, fw = self.get_interpolators(X,t) 83 | # Evaluate velocity at position(x[:], y[:]) 84 | X = X.transpose() 85 | 86 | 87 | vx = fu(X) 88 | del(fu) 89 | vy = fv(X) 90 | del(fv) 91 | vz = fw(X) 92 | del(fw) 93 | return np.array([vx, vy, vz]) 94 | 95 | 96 | def rk2(x, t, h, f): 97 | """ A second order Rung-Kutta method. 98 | The Explicit Trapezoid Method. 99 | 100 | Parameters: 101 | ----------- 102 | x : coordinates (as an array of vectors) 103 | h : timestep 104 | f : A function that returns the derivatives 105 | Returns: 106 | Next coordinates (as an array of vectors) 107 | ----------- 108 | """ 109 | 110 | # Note: t and h have actual time units. 111 | # For multiplying with h, we need to 112 | # convert to number of seconds: 113 | dt = h 114 | # "Slopes" 115 | k1 = f(x, t) 116 | k2 = f(x + k1 * dt, t + h) 117 | # Calculate next position 118 | x_ = x + dt * (k1 + k2) / 2 119 | return x_ 120 | 121 | 122 | def Euler(x, t, h, f): 123 | """ A first order Rung-Kutta method. 124 | The Explicit Euler method. 125 | 126 | Parameters: 127 | ----------- 128 | x : coordinates (as an array of vectors) 129 | h : timestep 130 | f : A function that returns the derivatives 131 | Returns: 132 | Next coordinates (as an array of vectors) 133 | ----------- 134 | """ 135 | 136 | # Note: t and h have actual time units. 137 | # For multiplying with h, we need to 138 | # convert to number of seconds: 139 | dt = h 140 | # Calculate next position 141 | #TODO find way to use this variable only once 142 | Np = np.shape(x)[1] 143 | x_ = x + dt * f(x, t) 144 | x_ += np.random.normal(loc=0,scale=np.sqrt(dt),size=(3,Np))*np.sqrt(2*0.0005) 145 | return x_ 146 | 147 | 148 | @jit(nopython=True,fastmath=True) 149 | def periodicBC(X,L,ldx): 150 | X = np.where(X > (L - ldx), X - (L - ldx), X) 151 | X = np.where(X < 0, X + (L - ldx), X) 152 | return X 153 | def plotScatter(fig_particle,ax_particle,i,X,rgbaTuple,pointSize,L,pointcolor1,pointcolor2,Np): 154 | ax_particle.scatter(X[i + 1][0, 0:int(Np/2)], X[i + 1][1, 0:int(Np/2)], X[i + 1][2, 0:int(Np/2)], s=pointSize, c=pointcolor1) 155 | ax_particle.scatter(X[i + 1][0, int(Np/2):], X[i + 1][1, int(Np/2):], X[i + 1][2, int(Np/2):], s=pointSize, c=pointcolor2) 156 | 157 | plt.xlim([0, L]) 158 | plt.ylim([0, L]) 159 | ax_particle.set_zlim(0, L) 160 | ax_particle.set_xlabel('x-axis') 161 | ax_particle.set_ylabel('y-axis') 162 | ax_particle.set_zlabel('z-axis') 163 | 164 | ax_particle.w_xaxis.set_pane_color(rgbaTuple) 165 | ax_particle.w_yaxis.set_pane_color(rgbaTuple) 166 | ax_particle.w_zaxis.set_pane_color(rgbaTuple) 167 | 168 | ax_particle.xaxis.set_major_formatter( 169 | tck.FuncFormatter(lambda val, pos: '{:.0g}$\pi$'.format(val / np.pi) if val != 0 else '0')) 170 | ax_particle.xaxis.set_major_locator(tck.MultipleLocator(base=np.pi)) 171 | ax_particle.yaxis.set_major_formatter( 172 | tck.FuncFormatter(lambda val, pos: '{:.0g}$\pi$'.format(val / np.pi) if val != 0 else '0')) 173 | ax_particle.yaxis.set_major_locator(tck.MultipleLocator(base=np.pi)) 174 | ax_particle.zaxis.set_major_formatter( 175 | tck.FuncFormatter(lambda val, pos: '{:.0g}$\pi$'.format(val / np.pi) if val != 0 else '0')) 176 | ax_particle.zaxis.set_major_locator(tck.MultipleLocator(base=np.pi)) 177 | 178 | #plt.pause(0.05) 179 | plt.savefig('./Particle_plots/test_'+str(i),dpi=600) 180 | ax_particle.clear() 181 | 182 | def particle_IC(Np,L,choice): 183 | if choice=='random two slots': 184 | Np_int = int(Np / 2) 185 | par_Pos_init = np.zeros((3, Np)) 186 | par_Pos_init[0, 0:Np_int] = np.random.uniform(L / 2 - L / 3, L / 2 - L / 3.5, size=Np_int) 187 | par_Pos_init[1, 0:Np_int] = np.random.uniform(L / 2 - L / 3, L / 2 - L / 3.5, size=Np_int) 188 | par_Pos_init[2, 0:Np_int] = np.random.uniform(L / 2 - L / 3, L / 2 - L / 3.5, size=Np_int) 189 | 190 | par_Pos_init[0, Np_int:] = np.random.uniform(L / 2 + L / 3, L / 2 + L / 3.5, size=Np_int) 191 | par_Pos_init[1, Np_int:] = np.random.uniform(L / 2 + L / 3, L / 2 + L / 3.5, size=Np_int) 192 | par_Pos_init[2, Np_int:] = np.random.uniform(L / 2 + L / 3, L / 2 + L / 3.5, size=Np_int) 193 | if choice=='middlePoint': 194 | par_Pos_init = np.zeros((3, Np)) 195 | par_Pos_init[0, :] = L/2 196 | par_Pos_init[1, :] = L/2 197 | par_Pos_init[2, :] = L/2 198 | if choice =='midNormal': 199 | variance = 0.01 200 | standarddev = np.sqrt(variance) 201 | par_Pos_init = np.zeros((3, Np)) 202 | par_Pos_init[0, :] = np.random.normal(np.pi,standarddev , size=Np) 203 | par_Pos_init[1, :] = np.random.normal(np.pi, standarddev, size=Np) 204 | par_Pos_init[2, :] = np.random.normal(np.pi,standarddev , size=Np) 205 | return par_Pos_init 206 | 207 | def trajectory(t0, Tmax, h, f, integrator,dynamicField,L,ldx,X0): 208 | """ Function to calculate trajectory of the particles. 209 | 210 | Parameters: 211 | ----------- 212 | X0 : A two dimensional array containing start positions 213 | (x0, y0) of each particle. 214 | t0 : Initial time 215 | Tmax: Final time 216 | h : Timestep 217 | f : Interpolator 218 | integrator: The chosen integrator function 219 | 220 | Returns: 221 | A three dimensional array containing the positions of 222 | each particle at every timestep on the interval (t0, Tmax). 223 | ----------- 224 | """ 225 | if (dynamicField==False): 226 | Nt = int((Tmax - t0) / h) # Number of datapoints 227 | X = np.zeros((Nt + 2, *X0.shape)) 228 | X[0, :] = X0 229 | t = t0 230 | try: 231 | pbar = tqdm(total=Nt) 232 | except: 233 | pass 234 | for i in range(Nt + 1): 235 | # Adjust last timestep to match Tmax exactly 236 | h = min(h, Tmax - t) 237 | t += h 238 | X[i + 1, :] = integrator(X[i, :], t, h, f) 239 | X[i+1,:] = periodicBC(X[i+1,:], L, ldx) 240 | 241 | 242 | #plotScatter(fig_particle, ax_particle, i, X, rgbaTuple, pointSize, L, pointcolor1, pointcolor2, Np) 243 | try: 244 | pbar.update(1) 245 | except: 246 | pass 247 | return X 248 | if (dynamicField==True): 249 | t=t0 #This variable is not used since we use explicit Euler method atm. 250 | X_new = integrator(X0,t,h,f) 251 | X_new = periodicBC(X_new,L,ldx) 252 | return X_new 253 | ''' 254 | if __name__=='__main__': 255 | velField = np.load('vel_files_iso/velocity_120.npy') 256 | f = Interpolator(velField) 257 | 258 | # Set initial conditions (t0 and x0) and timestep 259 | # Note that h also has time units, for convenient 260 | # calculation of t + h. 261 | 262 | # setting X0 in a slightly roundabout manner for 263 | # compatibility with Np >= 1 264 | 265 | comm = MPI.COMM_WORLD 266 | num_processes = comm.Get_size() 267 | rank = comm.Get_rank() 268 | #let Np be a multiple of num_processes 269 | Np = num_processes*50 270 | N=64 271 | L = np.pi*2 272 | ldx = L / N 273 | par_Pos_init = particle_IC(Np,L) 274 | 275 | fig_particle = plt.figure() 276 | ax_particle = fig_particle.add_subplot(111, projection='3d') 277 | pointSize = 3.1 278 | pointcolor1 = 'r' 279 | pointcolor2 = 'm' 280 | rgbaTuple = (167/255, 201/255, 235/255) 281 | 282 | #TODO wont need Tmax unless we collect new velocity field every time step. 283 | h = 0.01 284 | t0 = 0 285 | Tmax = 5 286 | N1_particle = int(Np/num_processes) 287 | timesteps = int(Tmax/h+2) 288 | 289 | split_coordinates = np.array_split(par_Pos_init, num_processes, axis=1) 290 | data = comm.scatter(split_coordinates, root=0) 291 | 292 | #X1_full = np.empty((timesteps,3,Np)) 293 | X1 = np.empty((timesteps, 3, N1_particle)) 294 | 295 | X1 = trajectory(t0, Tmax, h, f, Euler, False, L, ldx, data) 296 | X1_full = comm.gather(X1, root=0) 297 | if rank==0: 298 | 299 | X1_reshaped = np.concatenate(X1_full,axis=2) 300 | for i in range(int(Tmax/h+2)): 301 | plotScatter(fig_particle, ax_particle, i, X1_reshaped, rgbaTuple, pointSize, L, pointcolor1, pointcolor2, Np) 302 | ''' 303 | -------------------------------------------------------------------------------- /3D/MASTER/Post_processing/Compute_variance.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | dataset = np.load('particleCoord_8000.npy') 4 | 5 | varX_list = [] 6 | varY_list = [] 7 | varZ_list = [] 8 | 9 | 10 | datasetshape = np.shape(dataset) 11 | timesteps = datasetshape[0] 12 | 13 | for i in range(timesteps): 14 | varX_list.append(np.var(dataset[i][0,:])) 15 | varY_list.append(np.var(dataset[i][1,:])) 16 | varZ_list.append(np.var(dataset[i][2,:])) 17 | print(i,flush=True) 18 | 19 | np.save('VarX_list.npy',np.array(varX_list)) 20 | np.save('VarY_list.npy',np.array(varY_list)) 21 | np.save('VarZ_list.npy',np.array(varZ_list)) -------------------------------------------------------------------------------- /3D/MASTER/Post_processing/Variance_computation.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | plt.style.use('bmh') 4 | 5 | run='2' 6 | 7 | VarX = np.load('./Variance_files/run'+run+'/VarX_list.npy') 8 | VarY = np.load('./Variance_files/run'+run+'/VarY_list.npy') 9 | VarZ = np.load('./Variance_files/run'+run+'/VarZ_list.npy') 10 | 11 | combined_variance = [] 12 | 13 | for i in range(len(VarX)): 14 | combined_variance.append((VarX[i]+VarY[i]+VarZ[i])/2) 15 | 16 | 17 | time = np.arange(0,80,0.01) 18 | tsteps = len(time)+1 19 | b = 2*np.pi 20 | a = 0 21 | exact_var = (1/12)*((b-a)**2) 22 | 23 | 24 | linewidth_variance = 0.5 25 | linewidth_polyfit = 1 26 | 27 | idxa_fick = 1 28 | idxb_fick = 90 29 | 30 | idxc_total = 610 31 | idxd_total = 4000 32 | 33 | localidx_turb_a = 230 34 | localidx_turb_b = 518 35 | 36 | listToPlot = VarZ.copy() 37 | liststring = 'z' 38 | 39 | m1,c1 = np.polyfit(np.log(time[idxa_fick:idxb_fick]),np.log(listToPlot[idxa_fick:idxb_fick]),1) 40 | log_fit1 = m1*np.log(time[idxa_fick:idxb_fick])+c1 41 | 42 | m2,c2 = np.polyfit(np.log(time[idxc_total:idxd_total]),np.log(listToPlot[idxc_total:idxd_total]),1) 43 | log_fit2 = m2*np.log(time[idxc_total:idxd_total])+c2 44 | 45 | m3,c3 = np.polyfit(np.log(time[localidx_turb_a:localidx_turb_b]),np.log(listToPlot[localidx_turb_a:localidx_turb_b]),1) 46 | log_fit3 = m3*np.log(time[localidx_turb_a:localidx_turb_b])+c3 47 | 48 | 49 | #plt.loglog(t[idxc:idxd],tpower1[idxc:idxd],'b--') 50 | plt.loglog(time[idxa_fick:idxb_fick],np.exp(log_fit1),color='k',linestyle=(0,(3,1,1,1,1,1)),linewidth=linewidth_polyfit,label=r'$\mathrm{Fickian-diffusion},\; t^{%.2f}$'%(m1)) 51 | plt.loglog(time[idxc_total:idxd_total],np.exp(log_fit2),color='k',linestyle='dashdot',linewidth=linewidth_polyfit,label='$\mathrm{Turbulent-diffusion},\; t^{%.2f}$'%(m2)) 52 | #plt.loglog(time[localidx_turb_a:localidx_turb_b],np.exp(log_fit3),color='k',linestyle=(0,(1,1)),linewidth=linewidth_polyfit,label=r'$\mathrm{Richardson-scaling},\; t^{%.2f}}$'%(m3)) 53 | 54 | 55 | 56 | 57 | print('exact var..: ',exact_var) 58 | plt.plot(time,listToPlot[:-1],'b',linewidth=linewidth_variance,label=r'$\sigma^{2}\mathrm{,\;'+liststring+'-component}$') 59 | #plt.plot(time,VarY[:-1],'g',linewidth=linewidth_variance,label='Variance in PD y-component') 60 | #plt.plot(time,VarZ[:-1],'m',linewidth=linewidth_variance,label='Variance in PD z-component') 61 | #plt.plot(time,combined_variance[:-1],'m',linewidth=linewidth_variance,label='Combined variance') 62 | 63 | plt.plot(time,exact_var*np.ones(len(time)),'r--',linewidth=1,label=r'$\mathrm{Uniform \;distribution}$') 64 | 65 | 66 | plt.yscale(value="log") 67 | plt.xscale(value="log") 68 | plt.xlim((0.01,80)) 69 | plt.ylim((1e-5,10)) 70 | plt.xlabel('$\mathrm{Time \;(s)}$') 71 | plt.ylabel('$\sigma^{2} \; \mathrm{(m^{2})}$') 72 | plt.legend() 73 | plt.savefig('./Variance_files/Plots/variance_'+liststring+'_PD_'+run+'.png',dpi=1000) 74 | plt.show() 75 | -------------------------------------------------------------------------------- /3D/MASTER/Post_processing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielhalvorsen/Project_Turbulence_Modelling/4cb925604ace9f065017e0cbb41ec4b19a00f065/3D/MASTER/Post_processing/__init__.py -------------------------------------------------------------------------------- /3D/MASTER/Post_processing/spectrum_plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | plt.style.use('bmh') 4 | 5 | 6 | time=0 7 | 8 | step=160 9 | for i in range(55): 10 | time = 0 11 | time += step*i 12 | IC = 'isotropic' 13 | N=512 14 | N_half = int(N/2) 15 | 16 | k = np.load('./spectral_files/'+IC+'/wave_numbers_'+str(time)+'.npy') 17 | TKE = np.load('./spectral_files/'+IC+'/TKE'+str(time)+'.npy') 18 | 19 | 20 | plt.loglog(k[2:N_half],TKE[2:N_half],'g-',markerSize=2) 21 | plt.loglog(k[2:N_half],(k[2:N_half]**(-5/3)),'r--') 22 | plt.yscale(value='log') 23 | plt.xscale(value='log',basex=2) 24 | plt.ylim(ymin=(1e-18), ymax=1e3) 25 | plt.xlabel('$\mathrm{k}$') 26 | plt.ylabel('$\mathrm{E(k)}$') 27 | plt.legend(['$\mathrm{E(k)}$, $\mathrm{t= %.2f}$'%(time/100), r'$\mathrm{k^{-5/3}}$'],loc='lower left') 28 | plt.savefig('./spectrum_plots/'+IC+'/TKE_'+str(time)+'.png',dpi=1000) 29 | plt.cla() -------------------------------------------------------------------------------- /3D/MASTER/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielhalvorsen/Project_Turbulence_Modelling/4cb925604ace9f065017e0cbb41ec4b19a00f065/3D/MASTER/__init__.py -------------------------------------------------------------------------------- /3D/MASTER/math_dir/setup.py: -------------------------------------------------------------------------------- 1 | #from setuptools import setup 2 | from Cython.Build import cythonize 3 | from distutils.core import setup 4 | 5 | setup(ext_modules = cythonize("cross.pyx")) 6 | -------------------------------------------------------------------------------- /3D/MASTER/post_processing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.animation as animation 4 | from numpy.fft import fftfreq, fft, ifft, irfft2, fftn,fftshift,rfft,irfft 5 | import time 6 | from numpy import sqrt, zeros, conj, pi, arange, ones, convolve 7 | from matplotlib.ticker import ScalarFormatter 8 | import pickle 9 | from numba import jit 10 | from MPI_func.mpibase import work_arrays 11 | work_array = work_arrays() 12 | plt.style.use('bmh') 13 | 14 | 15 | 16 | 17 | 18 | def movingaverage(interval, window_size): 19 | window = ones(int(window_size)) / float(window_size) 20 | return convolve(interval, window, 'same') 21 | 22 | @jit(nopython=True,parallel=True) 23 | def kloop(nx,ny,nz,tke_spectrum,tkeh): 24 | for kx in range(-nx//2, nx//2-1): 25 | for ky in range(-ny//2, ny//2-1): 26 | for kz in range(-nz//2, nz//2-1): 27 | rk = sqrt(kx**2 + ky**2 + kz**2) 28 | k = int(np.round(rk)) 29 | tke_spectrum[k] += tkeh[kx, ky, kz] 30 | return tke_spectrum 31 | 32 | @jit(nopython=True) 33 | def dissipationLoop(N,K2,uh,vh,wh,nu): 34 | sum = 0 35 | for i in range(np.shape(K2)[0]): 36 | for j in range(np.shape(K2)[1]): 37 | for k in range(np.shape(K2)[2]): 38 | sum += (K2[i,j,k]*((uh[i,j,k]+vh[i,j,k])+wh[i,j,k])/3) 39 | 40 | 41 | return np.real(2*nu*sum) 42 | 43 | 44 | 45 | @jit(nopython=True,fastmath=True) 46 | def cross2a(c, a, b): 47 | """ c = 1j*(a x b)""" 48 | for i in range(a.shape[1]): 49 | for j in range(a.shape[2]): 50 | for k in range(a.shape[3]): 51 | a0 = a[0, i, j, k] 52 | a1 = a[1, i, j, k] 53 | a2 = a[2, i, j, k] 54 | b0 = b[0, i, j, k] 55 | b1 = b[1, i, j, k] 56 | b2 = b[2, i, j, k] 57 | c[0, i, j, k] = -(a1*b2.imag - a2*b1.imag) + 1j*(a1*b2.real - a2*b1.real) 58 | c[1, i, j, k] = -(a2*b0.imag - a0*b2.imag) + 1j*(a2*b0.real - a0*b2.real) 59 | c[2, i, j, k] = -(a0*b1.imag - a1*b0.imag) + 1j*(a0*b1.real - a1*b0.real) 60 | 61 | return c 62 | @jit(nopython=True) 63 | def integralDissipation(curl_hat): 64 | dissipation = np.sum(np.abs(curl_hat) ** 2) 65 | return dissipation 66 | 67 | def dissipationComputation(a, work, K,nu): 68 | """c = curl(a) = F_inv(F(curl(a))) = F_inv(1j*K x a)""" 69 | curl_hat = work[(a, 0, False)] 70 | curl_hat = cross2a(curl_hat, K, a) 71 | #c[0] = ifft(curl_hat[0]) 72 | #c[1] = ifft(curl_hat[1]) 73 | #c[2] = ifft(curl_hat[2]) 74 | dissipation = integralDissipation(curl_hat) 75 | return dissipation*nu 76 | 77 | def dissipationComputation2(b, work, K,nu): 78 | """c = curl(a) = F_inv(F(curl(a))) = F_inv(1j*K x a)""" 79 | uh = fftn(b[0])/N_three 80 | vh = fftn(b[1])/N_three 81 | wh = fftn(b[2])/N_three 82 | tmp = np.array([uh,vh,wh]) 83 | 84 | curl_hat = work[(tmp, 0, False)] 85 | curl_hat = cross2a(curl_hat, K, tmp) 86 | #c[0] = ifft(curl_hat[0]) 87 | #c[1] = ifft(curl_hat[1]) 88 | #c[2] = ifft(curl_hat[2]) 89 | dissipation = integralEnergy(curl_hat) 90 | return dissipation*nu 91 | 92 | @jit(nopython=True,parallel=True) 93 | def dissipationLoop(wave_numbers,nu,tke): 94 | sum = 0 95 | K2 = [x**2 for x in wave_numbers] 96 | for k in range(np.shape(K2)[0]): 97 | sum += (K2[k]*tke[k]) 98 | return np.real(2*nu*sum) 99 | 100 | @jit(nopython=True) 101 | def BandEnergy(tke,kf): 102 | sum = 0 103 | for k in range(kf): 104 | sum += (tke[k]) 105 | return sum 106 | 107 | 108 | def integralEnergy(arg): 109 | #TODO make this function work in parallel? 110 | result = ((sum(abs(arg[...]) ** 2))) 111 | return result/N_three 112 | 113 | def L2_norm(comm, arg,N_three): 114 | r"""Compute the L2-norm of real array a 115 | Computing \int abs(u)**2 dx 116 | """ 117 | 118 | #result = 119 | result = comm.allreduce((sum(abs(arg[...]) ** 2))) 120 | return result/N_three 121 | 122 | 123 | def compute_tke_spectrum(u, v, w, length, smooth): 124 | """ 125 | Given a velocity field u, v, w, this function computes the kinetic energy 126 | spectrum of that velocity field in spectral space. This procedure consists of the 127 | following steps: 128 | 1. Compute the spectral representation of u, v, and w using a fast Fourier transform. 129 | This returns uf, vf, and wf (the f stands for Fourier) 130 | 2. Compute the point-wise kinetic energy Ef (kx, ky, kz) = 1/2 * (uf, vf, wf)* conjugate(uf, vf, wf) 131 | 3. For every wave number triplet (kx, ky, kz) we have a corresponding spectral kinetic energy 132 | Ef(kx, ky, kz). To extract a one dimensional spectrum, E(k), we integrate Ef(kx,ky,kz) over 133 | the surface of a sphere of radius k = sqrt(kx^2 + ky^2 + kz^2). In other words 134 | E(k) = sum( E(kx,ky,kz), for all (kx,ky,kz) such that k = sqrt(kx^2 + ky^2 + kz^2) ). 135 | Parameters: 136 | ----------- 137 | u: 3D array 138 | The x-velocity component. 139 | v: 3D array 140 | The y-velocity component. 141 | w: 3D array 142 | The z-velocity component. 143 | lx: float 144 | The domain size in the x-direction. 145 | ly: float 146 | The domain size in the y-direction. 147 | lz: float 148 | The domain size in the z-direction. 149 | smooth: boolean 150 | A boolean to smooth the computed spectrum for nice visualization. 151 | """ 152 | lx,ly,lz = length,length,length 153 | nx = len(u[:, 0, 0]) 154 | ny = len(v[0, :, 0]) 155 | nz = len(w[0, 0, :]) 156 | 157 | nt = nx * ny * nz 158 | n = nx # int(np.round(np.power(nt,1.0/3.0))) 159 | 160 | uh = fftn(u) / nt 161 | vh = fftn(v) / nt 162 | wh = fftn(w) / nt 163 | 164 | tkeh = 0.5 * (uh * conj(uh) + vh * conj(vh) + wh * conj(wh)).real 165 | 166 | k0x = 2.0 * pi / lx 167 | k0y = 2.0 * pi / ly 168 | k0z = 2.0 * pi / lz 169 | 170 | knorm = (k0x + k0y + k0z) / 3.0 171 | print('knorm = ', knorm) 172 | 173 | kxmax = nx / 2 174 | kymax = ny / 2 175 | kzmax = nz / 2 176 | 177 | # dk = (knorm - kmax)/n 178 | # wn = knorm + 0.5 * dk + arange(0, nmodes) * dk 179 | 180 | wave_numbers = knorm * arange(0, n) 181 | 182 | tke_spectrum = zeros(len(wave_numbers)) 183 | tke_spectrum = kloop(nx,ny,nz,tke_spectrum,tkeh) 184 | tke_spectrum = tke_spectrum / knorm 185 | 186 | if smooth: 187 | tkespecsmooth = movingaverage(tke_spectrum, 5) # smooth the spectrum 188 | tkespecsmooth[0:4] = tke_spectrum[0:4] # get the first 4 values from the original data 189 | tke_spectrum = tkespecsmooth 190 | 191 | knyquist = knorm * min(nx, ny, nz) / 2 192 | 193 | return knyquist, wave_numbers, tke_spectrum 194 | 195 | def spectrum(length,u,v,w): 196 | data_path = "./" 197 | 198 | Figs_Path = "./" 199 | Fig_file_name = "Ek_Spectrum" 200 | 201 | # ----------------------------------------------------------------- 202 | # COMPUTATIONS 203 | # ----------------------------------------------------------------- 204 | localtime = time.asctime(time.localtime(time.time())) 205 | print("Computing spectrum... ", localtime) 206 | 207 | 208 | 209 | N = int(round((length ** (1. / 3)))) 210 | print("N =", N) 211 | eps = 1e-50 # to void log(0) 212 | 213 | U = u 214 | V = v 215 | W = w 216 | 217 | amplsU = abs(fftn(U) / U.size) 218 | amplsV = abs(fftn(V) / V.size) 219 | amplsW = abs(fftn(W) / W.size) 220 | 221 | EK_U = amplsU ** 2 222 | EK_V = amplsV ** 2 223 | EK_W = amplsW ** 2 224 | 225 | EK_U = fftshift(EK_U) 226 | EK_V = fftshift(EK_V) 227 | EK_W = fftshift(EK_W) 228 | 229 | sign_sizex = np.shape(EK_U)[0] 230 | sign_sizey = np.shape(EK_U)[1] 231 | sign_sizez = np.shape(EK_U)[2] 232 | 233 | box_sidex = sign_sizex 234 | box_sidey = sign_sizey 235 | box_sidez = sign_sizez 236 | 237 | box_radius = int(np.ceil((np.sqrt((box_sidex) ** 2 + (box_sidey) ** 2 + (box_sidez) ** 2)) / 2.) + 1) 238 | 239 | centerx = int(box_sidex / 2) 240 | centery = int(box_sidey / 2) 241 | centerz = int(box_sidez / 2) 242 | 243 | print("box sidex =", box_sidex) 244 | print("box sidey =", box_sidey) 245 | print("box sidez =", box_sidez) 246 | print("sphere radius =", box_radius) 247 | print("centerbox =", centerx) 248 | print("centerboy =", centery) 249 | print("centerboz =", centerz, "\n") 250 | 251 | EK_U_avsphr = np.zeros(box_radius, ) + eps ## size of the radius 252 | EK_V_avsphr = np.zeros(box_radius, ) + eps ## size of the radius 253 | EK_W_avsphr = np.zeros(box_radius, ) + eps ## size of the radius 254 | 255 | for i in range(box_sidex): 256 | for j in range(box_sidey): 257 | for k in range(box_sidez): 258 | wn = int(round(np.sqrt((i - centerx) ** 2 + (j - centery) ** 2 + (k - centerz) ** 2))) 259 | EK_U_avsphr[wn] = EK_U_avsphr[wn] + EK_U[i, j, k] 260 | EK_V_avsphr[wn] = EK_V_avsphr[wn] + EK_V[i, j, k] 261 | EK_W_avsphr[wn] = EK_W_avsphr[wn] + EK_W[i, j, k] 262 | print('iterating'+str(i),flush=True) 263 | 264 | EK_avsphr = 0.5 * (EK_U_avsphr + EK_V_avsphr + EK_W_avsphr) 265 | 266 | fig2 = plt.figure() 267 | #plt.title("Kinetic Energy Spectrum") 268 | plt.xlabel(r"k") 269 | plt.ylabel(r"E(k)") 270 | 271 | realsize = len(rfft(U[:, 0, 0])) 272 | plt.loglog(np.arange(0, realsize), ((EK_avsphr[0:realsize])), 'k') 273 | plt.loglog(np.arange(realsize, len(EK_avsphr), 1), ((EK_avsphr[realsize:])), 'k--') 274 | axes = plt.gca() 275 | axes.set_ylim([10 ** -25, 5 ** -1]) 276 | 277 | print("Real Kmax = ", realsize) 278 | print("Spherical Kmax = ", len(EK_avsphr)) 279 | 280 | TKEofmean_discrete = 0.5 * (sum(U / U.size) ** 2 + sum(W / W.size) ** 2 + sum(W / W.size) ** 2) 281 | TKEofmean_sphere = EK_avsphr[0] 282 | 283 | total_TKE_discrete = sum(0.5 * (U ** 2 + V ** 2 + W ** 2)) / (N * 1.0) ** 3 284 | total_TKE_sphere = sum(EK_avsphr) 285 | 286 | print("the KE of the mean velocity discrete = ", TKEofmean_discrete) 287 | print("the KE of the mean velocity sphere = ", TKEofmean_sphere) 288 | print("the mean KE discrete = ", total_TKE_discrete) 289 | print("the mean KE sphere = ", total_TKE_sphere) 290 | 291 | localtime = time.asctime(time.localtime(time.time())) 292 | print("Computing spectrum... ", localtime, "- END \n") 293 | 294 | # ----------------------------------------------------------------- 295 | # OUTPUT/PLOTS 296 | # ----------------------------------------------------------------- 297 | 298 | dataout = np.zeros((box_radius, 2)) 299 | dataout[:, 0] = np.arange(0, len(dataout)) 300 | dataout[:, 1] = EK_avsphr[0:len(dataout)] 301 | 302 | #savetxt(Figs_Path + Fig_file_name + '.dat', dataout) 303 | #fig.savefig(Figs_Path + Fig_file_name + '.pdf') 304 | return fig2 305 | 306 | 307 | fig, ax = plt.subplots() 308 | 309 | ims = [] 310 | step=0 311 | length=2*np.pi 312 | xticks = np.logspace(0,2,7) 313 | yticks = np.logspace(1,-13,5) 314 | N=512 315 | N_half = int(N/2) 316 | N_three = N**3 317 | kf = 8 318 | nu = 1/1600 319 | amount = 1 320 | name = 'vel_files/velocity_'+str(step)+'.npy' 321 | plot = 'dissipation' 322 | counter =0 323 | dissipationArray = np.zeros((amount)) 324 | #TODO make stepjump dynamic depending on the name of the files in the folder 325 | stepjump = 140 326 | timearray = np.arange(0,(amount*100),stepjump)/100 327 | energyarrayKf = [] 328 | energyarrayKin = [] 329 | 330 | runLoop = True 331 | 332 | if runLoop == True: 333 | kx = fftfreq(N, 1. / N) 334 | K = np.array(np.meshgrid(kx, kx, kx, indexing='ij'), dtype=int) 335 | K2 = np.sum(K * K, 0, dtype=int) 336 | #TODO load in one and one file from /vel_files, read [0][:,:,-1] and add to animation. Also make spectrum plots and viscous diffusion plots 337 | for i in range(amount): 338 | #name = 'vel_files/velocity_' + str(step) + '.npy' 339 | name = './Post_processing/single_vel_files/TG/velocity_0.npy' 340 | 341 | vec = np.load(name) 342 | print('Loaded nr: '+str(step),flush=True) 343 | if plot == 'plotVelocity': 344 | im = plt.imshow(vec[0][:,:,-1],cmap='jet', animated=True) 345 | ims.append([im]) 346 | if plot == 'isoVelocity': 347 | im = plt.imshow(vec[0][:,-1,:], animated=True) 348 | plt.savefig('iso_images/velocity_' + str(step)) 349 | plt.clf() 350 | if plot == 'spectrum1': 351 | fig = spectrum(N,vec[0],vec[1],vec[2]) 352 | plt.savefig('spectrum_plots/spectrum_'+str(step)) 353 | #im = plt.show() 354 | #ims.append([im]) 355 | if plot == 'spectrum2': 356 | nyquist,k,tke = compute_tke_spectrum(vec[0],vec[1],vec[2],length,True) 357 | eps = dissipationLoop(k, nu, tke) 358 | kinBand = BandEnergy(tke, kf) 359 | kinTotal = BandEnergy(tke, k[-1]) 360 | #energyarrayKin.append(kinBand) 361 | #plt.plot(timearray[0:len(energyarrayKin)] , energyarrayKin, 'r--') 362 | np.save('./spectrum_data/wave_numbers_'+str(step)+'.npy',k) 363 | np.save('./spectrum_data/TKE'+str(step)+'.npy',tke) 364 | 365 | 366 | plt.loglog(k[1:N_half],tke[1:N_half],'g.','markerSize=2') 367 | plt.loglog(k[1:N_half],(k[1:N_half]**(-5/3))*(eps**(-2/3)),'r--') 368 | plt.yscale('log') 369 | plt.ylim(ymin=(1e-18), ymax=1e3) 370 | # plt.xticks(xticks) 371 | # plt.yticks(yticks) 372 | plt.xlabel('Wave number, $k$') 373 | plt.ylabel('Turbulent kinetic energy, $E(k)$') 374 | plt.legend(['$E(k)$, t= %.2f'%(step/100), r'$\epsilon^{-2/3}k^{-5/3}$'],loc='lower left') 375 | 376 | #plt.savefig('spectrum_plots/spectrum_'+str(step)) 377 | plt.savefig('spectrum_plots/spectrum_TG_0.png',dpi=1000) 378 | 379 | plt.clf() 380 | if plot == 'dissipation': 381 | Nt = N**3 382 | uh = fftn(vec[0])/Nt 383 | vh = fftn(vec[1])/Nt 384 | wh = fftn(vec[2])/Nt 385 | u_hat = np.array([uh,vh,wh]) 386 | dissipationArray[counter]= dissipationComputation(u_hat,work_array,K,nu) 387 | counter +=1 388 | np.save('dissipation.npy', dissipationArray) 389 | 390 | step += stepjump 391 | print('Finished appending nr: '+str(step),flush=True) 392 | plt.plot(timearray,dissipationArray) 393 | 394 | 395 | ''' 396 | if plot =='plotVelocity': 397 | ani = animation.ArtistAnimation(fig, ims, interval=2, blit=False,repeat_delay=None) 398 | ani.save('spectrum.gif', writer='imagemagick') 399 | ''' 400 | else: 401 | dissipation = np.load('./Post_processing/dissipation.npy') 402 | 403 | 404 | f = open('./Post_processing/spectral_Re1600_512.txt',"r") 405 | #print(f.read(100)) 406 | a = np.genfromtxt('./Post_processing/spectral_Re1600_512.txt', delimiter=" ", dtype=None) 407 | 408 | plt.plot(timearray[0:len(dissipation)],dissipation,'k-',linewidth=0.7,label=r'$\mathrm{Taylor-Green \; dissipation}$') 409 | plt.plot(a[:,0],a[:,2],color='b',linestyle=(0,(3,1,1,1)),linewidth=0.7,label=r'$\mathrm{NASA.gov \;reference\; data}$') 410 | plt.xlabel('$\mathrm{Time \;(s)}$') 411 | plt.ylabel(r'$\epsilon$ $\mathrm{\;}$($\frac{m^2}{s^2}$)') 412 | plt.legend() 413 | #plt.show() 414 | plt.savefig('./Post_processing/dissipation.png',dpi=1000) -------------------------------------------------------------------------------- /3D/Project/3DNS_spectral.py: -------------------------------------------------------------------------------- 1 | from numpy import * 2 | from numpy.fft import fftfreq, fft, ifft, irfft2, rfft2 3 | from mpi4py import MPI 4 | import pickle 5 | import matplotlib.pyplot as plt 6 | from tqdm import tqdm 7 | 8 | # U is set to dtype float32 9 | # Reynoldsnumber determined by nu Re = 1600, nu = 1/1600 10 | nu = 0.0000625 11 | # nu = 0.00000625 12 | T = 10 13 | dt = 0.01 14 | animation_slice = 100 15 | N = int(2 ** 6) 16 | N_half = int(N / 2 + 1) 17 | comm = MPI.COMM_WORLD 18 | num_processes = comm.Get_size() 19 | rank = comm.Get_rank() 20 | Np = int(N / num_processes) 21 | X = mgrid[rank * Np:(rank + 1) * Np, :N, :N].astype(float) * 2 * pi / N 22 | # using np.empty() does not create a zero() list! 23 | U = empty((3, Np, N, N), dtype=float32) 24 | U_hat = empty((3, N, Np, N_half), dtype=complex) 25 | P = empty((Np, N, N)) 26 | P_hat = empty((N, Np, N_half), dtype=complex) 27 | U_hat0 = empty((3, N, Np, N_half), dtype=complex) 28 | U_hat1 = empty((3, N, Np, N_half), dtype=complex) 29 | dU = empty((3, N, Np, N_half), dtype=complex) 30 | Uc_hat = empty((N, Np, N_half), dtype=complex) 31 | Uc_hatT = empty((Np, N, N_half), dtype=complex) 32 | U_mpi = empty((num_processes, Np, Np, N_half), dtype=complex) 33 | curl = empty((3, Np, N, N)) 34 | animate_U_x = empty((int(T / dt/animation_slice), Np, N, N), dtype=float32) 35 | save_animation = True 36 | kx = fftfreq(N, 1. / N) 37 | kz = kx[:(N_half)].copy(); 38 | kz[-1] *= -1 39 | K = array(meshgrid(kx, kx[rank * Np:(rank + 1) * Np], kz, indexing="ij"), dtype=int) 40 | K2 = sum(K * K, 0, dtype=int) 41 | K_over_K2 = K.astype(float) / where(K2 == 0, 1, K2).astype(float) 42 | kmax_dealias = 2. / 3. * (N_half) 43 | dealias = array( 44 | (abs(K[0]) < kmax_dealias) * (abs(K[1]) < kmax_dealias) * (abs(K[2]) < kmax_dealias), 45 | dtype=bool) 46 | 47 | a = [1. / 6., 1. / 3., 1. / 3., 1. / 6.] 48 | b = [0.5, 0.5, 1.] 49 | dir = '/home/danieloh/PycharmProjects/Project_Turbulence_Modelling/animation_folder/' 50 | 51 | def ifftn_mpi(fu, u): 52 | # Inverse Fourier transform 53 | Uc_hat[:] = ifft(fu, axis=0) 54 | comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX]) 55 | Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape) 56 | u[:] = irfft2(Uc_hatT, axes=(1, 2)) 57 | return u 58 | 59 | 60 | def fftn_mpi(u, fu): 61 | # Forward Fourier transform 62 | Uc_hatT[:] = rfft2(u, axes=(1, 2)) 63 | U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np, N_half), 1) 64 | comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX]) 65 | fu[:] = fft(fu, axis=0) 66 | return fu 67 | 68 | 69 | def Cross(a, b, c): 70 | # 3D cross product 71 | c[0] = fftn_mpi(a[1] * b[2] - a[2] * b[1], c[0]) 72 | c[1] = fftn_mpi(a[2] * b[0] - a[0] * b[2], c[1]) 73 | c[2] = fftn_mpi(a[0] * b[1] - a[1] * b[0], c[2]) 74 | return c 75 | 76 | 77 | def Curl(a, c): 78 | # 3D curl operator 79 | c[2] = ifftn_mpi(1j * (K[0] * a[1] - K[1] * a[0]), c[2]) 80 | c[1] = ifftn_mpi(1j * (K[2] * a[0] - K[0] * a[2]), c[1]) 81 | c[0] = ifftn_mpi(1j * (K[1] * a[2] - K[2] * a[1]), c[0]) 82 | return c 83 | 84 | 85 | def computeRHS(dU, rk): 86 | # Compute residual of time integral as specified in pseudo spectral Galerkin method 87 | if rk > 0: 88 | for i in range(3): 89 | U[i] = ifftn_mpi(U_hat[i], U[i]) 90 | curl[:] = Curl(U_hat, curl) 91 | dU = Cross(U, curl, dU) 92 | dU *= dealias 93 | P_hat[:] = sum(dU * K_over_K2, 0, out=P_hat) 94 | dU -= P_hat * K 95 | dU -= nu * K2 * U_hat 96 | return dU 97 | 98 | 99 | # initial condition and transformation to Fourier space 100 | U[0] = sin(X[0]) * cos(X[1]) * cos(X[2]) 101 | U[1] = -cos(X[0]) * sin(X[1]) * cos(X[2]) 102 | U[2] = 0 103 | for i in range(3): 104 | U_hat[i] = fftn_mpi(U[i], U_hat[i]) 105 | 106 | # Time integral using a Runge Kutta scheme 107 | t = 0.0 108 | tstep = 0 109 | save_nr = 1 110 | savecount = 1 111 | mid_idx = int(N / 2) 112 | pbar = tqdm(total=int(T / dt)) 113 | while t < T - 1e-8: 114 | 115 | t += dt; 116 | U_hat1[:] = U_hat0[:] = U_hat 117 | for rk in range(4): 118 | # Run RK4 temporal integral method 119 | dU = computeRHS(dU, rk) 120 | if rk < 3: U_hat[:] = U_hat0 + b[rk] * dt * dU 121 | U_hat1[:] += a[rk] * dt * dU 122 | U_hat[:] = U_hat1[:] 123 | for i in range(3): 124 | # Inverse Fourier transform after RK4 algorithm 125 | U[i] = ifftn_mpi(U_hat[i], U[i]) 126 | # if save_animation == True and tstep % save_nr == 0: 127 | # Save the animation every "save_nr" time step 128 | animate_U_x[tstep%(animation_slice+1)] = U[0].copy() 129 | if tstep%(animation_slice+1)==0: 130 | if save_animation == True: 131 | animate_gather = comm.gather(animate_U_x,root=0) 132 | with open(dir+'animate_U'+str(savecount)+'.pkl', 'wb') as h: 133 | pickle.dump([animate_gather], h) 134 | savecount += 1 135 | tstep += 1 136 | pbar.update(1) 137 | 138 | k = comm.reduce(0.5 * sum(U * U) * (1. / N) ** 3) 139 | # if rank == 0: 140 | # assert round(k - 0.124953117517, 7) == 0 141 | pbar.close() 142 | 143 | # Gather the scattered data and store into two variables, X and U. 144 | # Root is rank of receiving process (core 1) 145 | U_gathered = comm.gather(U, root=0) 146 | X_gathered = comm.gather(X, root=0) 147 | #animate_gathered = comm.gather(animate_U_x,root=0) 148 | 149 | ##animate_U_x_T = animate_U_x.transpose((0, 3, 2, 1)) 150 | #animate_save_T = [animate_U_x_T[i][int(N / 2)] for i in range(len(animate_U_x_T))] 151 | 152 | with open('U' + '.pkl', 'wb') as f: 153 | pickle.dump([U_gathered], f) 154 | 155 | with open('X.pkl', 'wb') as g: 156 | pickle.dump([X_gathered], g) 157 | 158 | 159 | -------------------------------------------------------------------------------- /3D/Project/Plotting/3DNS_dynamic_plot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import matplotlib.pyplot as plt 3 | from numpy import * 4 | from mayavi import mlab 5 | from basic_units import radians, degrees, cos 6 | from radians_plot import * 7 | import matplotlib.animation as animation 8 | import types 9 | 10 | # X = mgrid[rank * Np:(rank + 1) * Np, :N, :N].astype(float) * 2 * pi / N 11 | # U = empty((3, Np, N, N),dtype=float32) 12 | with open('X.pkl', 'rb') as g: 13 | X_pkl = pickle.load(g) 14 | with open('U.pkl', 'rb') as f: 15 | U_pkl = pickle.load(f) 16 | with open('animate_U_x.pkl', 'rb') as h: 17 | animate_U_x_pkl = pickle.load(h) 18 | 19 | print(np.shape(U_pkl)) 20 | print(np.shape(animate_U_x_pkl)) 21 | 22 | 23 | # Concatenates the processor axis on spatial mesh, X, and solution mesh, U. This is 24 | # done in a dynamical for-loop which changes size depending on number of processors. 25 | X_list = [X_pkl[0][i] for i in range(np.size(X_pkl, 1))] 26 | U_list = [U_pkl[0][i] for i in range(np.size(U_pkl, 1))] 27 | animate_U_x_list = [animate_U_x_pkl[0][i] for i in range(np.size(X_pkl, 1))] 28 | X = concatenate(X_list, axis=1) 29 | U = concatenate(U_list, axis=1) 30 | animate_U_x = concatenate(animate_U_x_list, axis=1) 31 | 32 | U_x = U[0].transpose((2, 1, 0)) 33 | U_y = U[1].transpose((2, 1, 0)) 34 | U_z = U[2].transpose((2, 1, 0)) 35 | animate_U_x_T = animate_U_x.transpose((0,3,2,1)) 36 | 37 | 38 | print(np.shape(U_x)) 39 | print(np.shape(animate_U_x_T)) 40 | 41 | N = int(len(X[0, 0, 0, :])) 42 | mid_idx = int(N / 2) 43 | 44 | mlab.pipeline.image_plane_widget(mlab.pipeline.scalar_field(U[0]), 45 | plane_orientation='z_axes', 46 | slice_index=mid_idx, 47 | ) 48 | mlab.axes(xlabel='x', ylabel='y', zlabel='z') 49 | mlab.outline() 50 | # mlab.show() 51 | 52 | 53 | # Plot contour lines of the velocity in X-direction in the middle of the cube. 54 | # X mesh is listed by X([z-levels,],[y-levels],[x-levels]), addressing, X[2] points to 55 | # the mesh in x-direction. 56 | plt.contourf(X[2, 0], X[1, 0], U_x[mid_idx], 57 | xunits=radians, yunits=radians, levels=256, cmap=plt.get_cmap('jet')) 58 | ax = plt.gca() 59 | ax.set_xlabel('x') 60 | ax.set_ylabel('y') 61 | ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 62 | ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 63 | ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 64 | ax.yaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) 65 | ax.yaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) 66 | ax.yaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) 67 | plt.show() 68 | 69 | 70 | with open('animate_U_x_'+str(0)+'.pkl', 'rb') as i: 71 | U_0= pickle.load(i) 72 | with open('animate_U_x_'+str(1)+'.pkl', 'rb') as j: 73 | U_1= pickle.load(j) 74 | with open('animate_U_x_'+str(2)+'.pkl', 'rb') as k: 75 | U_2= pickle.load(k) 76 | with open('animate_U_x_'+str(3)+'.pkl', 'rb') as l: 77 | U_3= pickle.load(l) 78 | print(np.shape(U_0)) 79 | 80 | U_anim = concatenate([U_0,U_1,U_2,U_3],axis=2) 81 | 82 | fig = plt.figure() 83 | # ims is a list of lists, each row is a list of artists to draw in the 84 | # current frame; here we are just animating one artist, the image, in 85 | # each frame 86 | ims = [] 87 | for i in range(0,len(animate_U_x_T),10): 88 | print('Appending image nr: '+str(i)) 89 | im = plt.imshow(animate_U_x_T[i][mid_idx], animated=True) 90 | ims.append([im]) 91 | 92 | ani = animation.ArtistAnimation(fig, ims, interval=100, blit=True, 93 | repeat_delay=None) 94 | 95 | #ani.save('dynamic_images.mp4') 96 | ani.save('animation.gif', writer='imagemagick', fps=30) 97 | 98 | plt.show() -------------------------------------------------------------------------------- /3D/Project/Plotting/basic_units.py: -------------------------------------------------------------------------------- 1 | """ 2 | =========== 3 | Basic Units 4 | =========== 5 | 6 | """ 7 | 8 | import math 9 | 10 | import numpy as np 11 | 12 | import matplotlib.units as units 13 | import matplotlib.ticker as ticker 14 | 15 | 16 | class ProxyDelegate(object): 17 | def __init__(self, fn_name, proxy_type): 18 | self.proxy_type = proxy_type 19 | self.fn_name = fn_name 20 | 21 | def __get__(self, obj, objtype=None): 22 | return self.proxy_type(self.fn_name, obj) 23 | 24 | 25 | class TaggedValueMeta(type): 26 | def __init__(self, name, bases, dict): 27 | for fn_name in self._proxies: 28 | try: 29 | dummy = getattr(self, fn_name) 30 | except AttributeError: 31 | setattr(self, fn_name, 32 | ProxyDelegate(fn_name, self._proxies[fn_name])) 33 | 34 | 35 | class PassThroughProxy(object): 36 | def __init__(self, fn_name, obj): 37 | self.fn_name = fn_name 38 | self.target = obj.proxy_target 39 | 40 | def __call__(self, *args): 41 | fn = getattr(self.target, self.fn_name) 42 | ret = fn(*args) 43 | return ret 44 | 45 | 46 | class ConvertArgsProxy(PassThroughProxy): 47 | def __init__(self, fn_name, obj): 48 | PassThroughProxy.__init__(self, fn_name, obj) 49 | self.unit = obj.unit 50 | 51 | def __call__(self, *args): 52 | converted_args = [] 53 | for a in args: 54 | try: 55 | converted_args.append(a.convert_to(self.unit)) 56 | except AttributeError: 57 | converted_args.append(TaggedValue(a, self.unit)) 58 | converted_args = tuple([c.get_value() for c in converted_args]) 59 | return PassThroughProxy.__call__(self, *converted_args) 60 | 61 | 62 | class ConvertReturnProxy(PassThroughProxy): 63 | def __init__(self, fn_name, obj): 64 | PassThroughProxy.__init__(self, fn_name, obj) 65 | self.unit = obj.unit 66 | 67 | def __call__(self, *args): 68 | ret = PassThroughProxy.__call__(self, *args) 69 | return (NotImplemented if ret is NotImplemented 70 | else TaggedValue(ret, self.unit)) 71 | 72 | 73 | class ConvertAllProxy(PassThroughProxy): 74 | def __init__(self, fn_name, obj): 75 | PassThroughProxy.__init__(self, fn_name, obj) 76 | self.unit = obj.unit 77 | 78 | def __call__(self, *args): 79 | converted_args = [] 80 | arg_units = [self.unit] 81 | for a in args: 82 | if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'): 83 | # if this arg has a unit type but no conversion ability, 84 | # this operation is prohibited 85 | return NotImplemented 86 | 87 | if hasattr(a, 'convert_to'): 88 | try: 89 | a = a.convert_to(self.unit) 90 | except Exception: 91 | pass 92 | arg_units.append(a.get_unit()) 93 | converted_args.append(a.get_value()) 94 | else: 95 | converted_args.append(a) 96 | if hasattr(a, 'get_unit'): 97 | arg_units.append(a.get_unit()) 98 | else: 99 | arg_units.append(None) 100 | converted_args = tuple(converted_args) 101 | ret = PassThroughProxy.__call__(self, *converted_args) 102 | if ret is NotImplemented: 103 | return NotImplemented 104 | ret_unit = unit_resolver(self.fn_name, arg_units) 105 | if ret_unit is NotImplemented: 106 | return NotImplemented 107 | return TaggedValue(ret, ret_unit) 108 | 109 | 110 | class TaggedValue(metaclass=TaggedValueMeta): 111 | 112 | _proxies = {'__add__': ConvertAllProxy, 113 | '__sub__': ConvertAllProxy, 114 | '__mul__': ConvertAllProxy, 115 | '__rmul__': ConvertAllProxy, 116 | '__cmp__': ConvertAllProxy, 117 | '__lt__': ConvertAllProxy, 118 | '__gt__': ConvertAllProxy, 119 | '__len__': PassThroughProxy} 120 | 121 | def __new__(cls, value, unit): 122 | # generate a new subclass for value 123 | value_class = type(value) 124 | try: 125 | subcls = type(f'TaggedValue_of_{value_class.__name__}', 126 | (cls, value_class), {}) 127 | if subcls not in units.registry: 128 | units.registry[subcls] = basicConverter 129 | return object.__new__(subcls) 130 | except TypeError: 131 | if cls not in units.registry: 132 | units.registry[cls] = basicConverter 133 | return object.__new__(cls) 134 | 135 | def __init__(self, value, unit): 136 | self.value = value 137 | self.unit = unit 138 | self.proxy_target = self.value 139 | 140 | def __getattribute__(self, name): 141 | if name.startswith('__'): 142 | return object.__getattribute__(self, name) 143 | variable = object.__getattribute__(self, 'value') 144 | if hasattr(variable, name) and name not in self.__class__.__dict__: 145 | return getattr(variable, name) 146 | return object.__getattribute__(self, name) 147 | 148 | def __array__(self, dtype=object): 149 | return np.asarray(self.value).astype(dtype) 150 | 151 | def __array_wrap__(self, array, context): 152 | return TaggedValue(array, self.unit) 153 | 154 | def __repr__(self): 155 | return 'TaggedValue({!r}, {!r})'.format(self.value, self.unit) 156 | 157 | def __str__(self): 158 | return str(self.value) + ' in ' + str(self.unit) 159 | 160 | def __len__(self): 161 | return len(self.value) 162 | 163 | def __iter__(self): 164 | # Return a generator expression rather than use `yield`, so that 165 | # TypeError is raised by iter(self) if appropriate when checking for 166 | # iterability. 167 | return (TaggedValue(inner, self.unit) for inner in self.value) 168 | 169 | def get_compressed_copy(self, mask): 170 | new_value = np.ma.masked_array(self.value, mask=mask).compressed() 171 | return TaggedValue(new_value, self.unit) 172 | 173 | def convert_to(self, unit): 174 | if unit == self.unit or not unit: 175 | return self 176 | try: 177 | new_value = self.unit.convert_value_to(self.value, unit) 178 | except AttributeError: 179 | new_value = self 180 | return TaggedValue(new_value, unit) 181 | 182 | def get_value(self): 183 | return self.value 184 | 185 | def get_unit(self): 186 | return self.unit 187 | 188 | 189 | class BasicUnit(object): 190 | def __init__(self, name, fullname=None): 191 | self.name = name 192 | if fullname is None: 193 | fullname = name 194 | self.fullname = fullname 195 | self.conversions = dict() 196 | 197 | def __repr__(self): 198 | return f'BasicUnit({self.name})' 199 | 200 | def __str__(self): 201 | return self.fullname 202 | 203 | def __call__(self, value): 204 | return TaggedValue(value, self) 205 | 206 | def __mul__(self, rhs): 207 | value = rhs 208 | unit = self 209 | if hasattr(rhs, 'get_unit'): 210 | value = rhs.get_value() 211 | unit = rhs.get_unit() 212 | unit = unit_resolver('__mul__', (self, unit)) 213 | if unit is NotImplemented: 214 | return NotImplemented 215 | return TaggedValue(value, unit) 216 | 217 | def __rmul__(self, lhs): 218 | return self*lhs 219 | 220 | def __array_wrap__(self, array, context): 221 | return TaggedValue(array, self) 222 | 223 | def __array__(self, t=None, context=None): 224 | ret = np.array([1]) 225 | if t is not None: 226 | return ret.astype(t) 227 | else: 228 | return ret 229 | 230 | def add_conversion_factor(self, unit, factor): 231 | def convert(x): 232 | return x*factor 233 | self.conversions[unit] = convert 234 | 235 | def add_conversion_fn(self, unit, fn): 236 | self.conversions[unit] = fn 237 | 238 | def get_conversion_fn(self, unit): 239 | return self.conversions[unit] 240 | 241 | def convert_value_to(self, value, unit): 242 | conversion_fn = self.conversions[unit] 243 | ret = conversion_fn(value) 244 | return ret 245 | 246 | def get_unit(self): 247 | return self 248 | 249 | 250 | class UnitResolver(object): 251 | def addition_rule(self, units): 252 | for unit_1, unit_2 in zip(units[:-1], units[1:]): 253 | if unit_1 != unit_2: 254 | return NotImplemented 255 | return units[0] 256 | 257 | def multiplication_rule(self, units): 258 | non_null = [u for u in units if u] 259 | if len(non_null) > 1: 260 | return NotImplemented 261 | return non_null[0] 262 | 263 | op_dict = { 264 | '__mul__': multiplication_rule, 265 | '__rmul__': multiplication_rule, 266 | '__add__': addition_rule, 267 | '__radd__': addition_rule, 268 | '__sub__': addition_rule, 269 | '__rsub__': addition_rule} 270 | 271 | def __call__(self, operation, units): 272 | if operation not in self.op_dict: 273 | return NotImplemented 274 | 275 | return self.op_dict[operation](self, units) 276 | 277 | 278 | unit_resolver = UnitResolver() 279 | 280 | cm = BasicUnit('cm', 'centimeters') 281 | inch = BasicUnit('inch', 'inches') 282 | inch.add_conversion_factor(cm, 2.54) 283 | cm.add_conversion_factor(inch, 1/2.54) 284 | 285 | radians = BasicUnit('rad', 'radians') 286 | degrees = BasicUnit('deg', 'degrees') 287 | radians.add_conversion_factor(degrees, 180.0/np.pi) 288 | degrees.add_conversion_factor(radians, np.pi/180.0) 289 | 290 | secs = BasicUnit('s', 'seconds') 291 | hertz = BasicUnit('Hz', 'Hertz') 292 | minutes = BasicUnit('min', 'minutes') 293 | 294 | secs.add_conversion_fn(hertz, lambda x: 1./x) 295 | secs.add_conversion_factor(minutes, 1/60.0) 296 | 297 | 298 | # radians formatting 299 | def rad_fn(x, pos=None): 300 | if x >= 0: 301 | n = int((x / np.pi) * 2.0 + 0.25) 302 | else: 303 | n = int((x / np.pi) * 2.0 - 0.25) 304 | 305 | if n == 0: 306 | return '0' 307 | elif n == 1: 308 | return r'$\pi/2$' 309 | elif n == 2: 310 | return r'$\pi$' 311 | elif n == -1: 312 | return r'$-\pi/2$' 313 | elif n == -2: 314 | return r'$-\pi$' 315 | elif n % 2 == 0: 316 | return fr'${n//2}\pi$' 317 | else: 318 | return fr'${n}\pi/2$' 319 | 320 | 321 | class BasicUnitConverter(units.ConversionInterface): 322 | @staticmethod 323 | def axisinfo(unit, axis): 324 | 'return AxisInfo instance for x and unit' 325 | 326 | if unit == radians: 327 | return units.AxisInfo( 328 | majloc=ticker.MultipleLocator(base=np.pi/2), 329 | majfmt=ticker.FuncFormatter(rad_fn), 330 | label=unit.fullname, 331 | ) 332 | elif unit == degrees: 333 | return units.AxisInfo( 334 | majloc=ticker.AutoLocator(), 335 | majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'), 336 | label=unit.fullname, 337 | ) 338 | elif unit is not None: 339 | if hasattr(unit, 'fullname'): 340 | return units.AxisInfo(label=unit.fullname) 341 | elif hasattr(unit, 'unit'): 342 | return units.AxisInfo(label=unit.unit.fullname) 343 | return None 344 | 345 | @staticmethod 346 | def convert(val, unit, axis): 347 | if units.ConversionInterface.is_numlike(val): 348 | return val 349 | if np.iterable(val): 350 | if isinstance(val, np.ma.MaskedArray): 351 | val = val.astype(float).filled(np.nan) 352 | out = np.empty(len(val)) 353 | for i, thisval in enumerate(val): 354 | if np.ma.is_masked(thisval): 355 | out[i] = np.nan 356 | else: 357 | try: 358 | out[i] = thisval.convert_to(unit).get_value() 359 | except AttributeError: 360 | out[i] = thisval 361 | return out 362 | if np.ma.is_masked(val): 363 | return np.nan 364 | else: 365 | return val.convert_to(unit).get_value() 366 | 367 | @staticmethod 368 | def default_units(x, axis): 369 | 'return the default unit for x or None' 370 | if np.iterable(x): 371 | for thisx in x: 372 | return thisx.unit 373 | return x.unit 374 | 375 | 376 | def cos(x): 377 | if np.iterable(x): 378 | return [math.cos(val.convert_to(radians).get_value()) for val in x] 379 | else: 380 | return math.cos(x.convert_to(radians).get_value()) 381 | 382 | 383 | basicConverter = BasicUnitConverter() 384 | units.registry[BasicUnit] = basicConverter 385 | units.registry[TaggedValue] = basicConverter 386 | -------------------------------------------------------------------------------- /3D/Project/Plotting/dns_plot.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import matplotlib.pyplot as plt 3 | from numpy import * 4 | from mayavi import mlab 5 | 6 | 7 | #X = mgrid[rank * Np:(rank + 1) * Np, :N, :N].astype(float) * 2 * pi / N 8 | #U = empty((3, Np, N, N),dtype=float32) 9 | with open('X.pkl', 'rb') as g: 10 | X = pickle.load(g) 11 | #Np=4 12 | #N = int((2**6)) 13 | #U = empty((3, Np, N, N),dtype=float32) 14 | #U = zeros(Np) 15 | #for rank in range(Np): 16 | with open('U_rank_'+str(0)+'.pkl', 'rb') as f: 17 | U_0= pickle.load(f) 18 | with open('U_rank_'+str(1)+'.pkl', 'rb') as f: 19 | U_1= pickle.load(f) 20 | with open('U_rank_'+str(2)+'.pkl', 'rb') as f: 21 | U_2= pickle.load(f) 22 | with open('U_rank_'+str(3)+'.pkl', 'rb') as f: 23 | U_3= pickle.load(f) 24 | 25 | #U=[U_0,U_1,U_2,U_3] 26 | #U[0,:,:32,:,:]=U_0 27 | U = concatenate([U_0,U_1,U_2,U_3],axis=2) 28 | print(shape(U_0)) 29 | print(shape(U)) 30 | 31 | #print('printing X: \n\n') 32 | ##print(X[0][1][:,:,1]) 33 | #print('\n\nPrinting U: \n\n') 34 | #print(U[0][0,:,:,3]) 35 | #print('\n\n') 36 | #index pickle elements with [0] 37 | #print(U[0] is float) 38 | #plt.contourf(X[0][0][:, :, 0], X[0][0][:, :, 0], U[0][0,:, :, 0], 100) 39 | #plt.colorbar() 40 | #plt.show() 41 | 42 | mlab.pipeline.image_plane_widget(mlab.pipeline.scalar_field(U[0][2]), 43 | plane_orientation='x_axes', 44 | slice_index=100, 45 | ) 46 | #mlab.quiver3d(U[0][0],U[0][1],U[0][2]) 47 | 48 | mlab.outline() 49 | mlab.show() -------------------------------------------------------------------------------- /3D/Project/Plotting/radians_plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | def multiple_formatter(denominator=2, number=np.pi, latex='\pi'): 5 | def gcd(a, b): 6 | while b: 7 | a, b = b, a%b 8 | return a 9 | def _multiple_formatter(x, pos): 10 | den = denominator 11 | num = np.int(np.rint(den*x/number)) 12 | com = gcd(num,den) 13 | (num,den) = (int(num/com),int(den/com)) 14 | if den==1: 15 | if num==0: 16 | return r'$0$' 17 | if num==1: 18 | return r'$%s$'%latex 19 | elif num==-1: 20 | return r'$-%s$'%latex 21 | else: 22 | return r'$%s%s$'%(num,latex) 23 | else: 24 | if num==1: 25 | return r'$\frac{%s}{%s}$'%(latex,den) 26 | elif num==-1: 27 | return r'$\frac{-%s}{%s}$'%(latex,den) 28 | else: 29 | return r'$\frac{%s%s}{%s}$'%(num,latex,den) 30 | return _multiple_formatter 31 | 32 | class Multiple: 33 | def __init__(self, denominator=2, number=np.pi, latex='\pi'): 34 | self.denominator = denominator 35 | self.number = number 36 | self.latex = latex 37 | 38 | def locator(self): 39 | return plt.MultipleLocator(self.number / self.denominator) 40 | 41 | def formatter(self): 42 | return plt.FuncFormatter(multiple_formatter(self.denominator, self.number, self.latex)) -------------------------------------------------------------------------------- /3D/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/danielhalvorsen/Project_Turbulence_Modelling/4cb925604ace9f065017e0cbb41ec4b19a00f065/3D/__init__.py -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DNS solver for the Navier-Stokes equations using a spectral method # 2 | 3 | ### Homogenous isotropic forced turbulence. ### 4 | 5 |
6 |
7 |
9 | 3D-isotropic turbulence, N=512, Re=1600, T=60. 10 |
11 | 12 |
13 |
14 |
16 | Particle distribution in a 3D-isotropic turbulence, N=512, Re=1600, Tmax = 46 seconds. 17 |
18 | 19 | 20 | See [1] for initialization and [2] for a section 21 | on forcing the lowest wavenumbers (k<=kf=8) to maintain a constant turbulent 22 | kinetic energy. Parameters related to the initial condition are set such that the kinetic energy matches Taylor-Green initial conditions (a= [9.5,3.5], C=[10000,2600]). 23 | 24 | ### Decaying Taylor Green Vortex. ### 25 | 26 |
30 |
31 |
6 |
7 |
9 | 3D-isotropic turbulence, N=512, Re=1600, T=60. 10 |
11 | 12 |
13 |
14 |
16 | Particle distribution in a 3D-isotropic turbulence, N=512, Re=1600, Tmax = 46 seconds. 17 |
18 | 19 | 20 | See [1] for initialization and [2] for a section 21 | on forcing the lowest wavenumbers (k<=kf=8) to maintain a constant turbulent 22 | kinetic energy. Parameters related to the initial condition are set such that the kinetic energy matches Taylor-Green initial conditions (a= [9.5,3.5], C=[10000,2600]). 23 | 24 | ### Decaying Taylor Green Vortex. ### 25 | 26 |
30 |
31 |