├── ntk.py ├── variable_speed.py ├── README.md ├── .gitignore ├── config.py ├── getGIF.py ├── annexe ├── annexe_1.py └── annexe_2.py ├── real_sol.py ├── equation.py ├── vrac └── bails_sombres.py ├── dataset.py ├── dataset2D.py ├── real_sol2d.py ├── gradients.py ├── network.py ├── variational_network.py ├── network2d.py ├── pinn_training.py ├── pinn_training2D.py └── variational_network2D.py /ntk.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | -------------------------------------------------------------------------------- /variable_speed.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | def c_fun(x,t): 5 | #Parabolic profile 6 | c = 4*x**2-4*x+3 7 | return c -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PINN_torch 2 | 3 | Pytorch implementation of Physics Informed Neural Network for wave equation forward prediction in heterogenous domains and multi-dimensional spatial study. 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | first_nn.py 3 | .vscode 4 | results 5 | generated* 6 | loss* 7 | model* 8 | PhyCRNet 9 | runs 10 | profile.txt 11 | profiling.py 12 | vrac 13 | figs 14 | results2Dnew -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | DEFAULT_CONFIG = { 2 | "N_i": 16, 3 | "N_b": 16, 4 | "N_r": 16, 5 | "l_b": 0.0, 6 | "u_b": 1.0, 7 | "epochs": 5000, 8 | "N_plotting": 100, 9 | "lr": 1e-4, 10 | "N_neurons":20, 11 | "N_layers":8, 12 | } -------------------------------------------------------------------------------- /getGIF.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | 4 | def create_gif(epoch, len_t): 5 | files = [] 6 | for i in range(len_t): 7 | seq = str(i) 8 | file_names = 'results2Dnew/epoch_' + str(epoch) + "/t_" + seq + '.png' 9 | files.append(file_names) 10 | 11 | # Create the frames 12 | frames = [] 13 | for i in files: 14 | new_frame = Image.open(i) 15 | frames.append(new_frame) 16 | 17 | # Save into a GIF file that loops forever 18 | frames[0].save(f'results2Dnew/animation_{epoch}.gif', format='GIF', 19 | append_images=frames[1:], 20 | save_all=True, 21 | duration=40, loop=0) 22 | -------------------------------------------------------------------------------- /annexe/annexe_1.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | # 'Solarize_Light2' 5 | 6 | t= 10 #t temps final 7 | nt = 10000 8 | x= 10 #x longueur du fil 9 | nx = 100 #nx , nt nombres de pas interieurs 10 | dx=(x/nx) # dx pas spatial 11 | dt=(t/nt) #dt pas temporel 12 | c= 0.5 # c célerité de l'onde 13 | alphacarre = ((c*dt)/dx)**2 14 | T=5 15 | f = lambda x : np.sin((x*np.pi*2)/10) 16 | 17 | X=np.arange(0,t+dt,dt) #Vecteurs allant de dt 18 | Y=np.arange(0,x+dx,dx) 19 | U_0= np.array(list(map(f,np.arange(dx,x,dx)))) #Condition initiale u(x,0),sinusoide 20 | U_1= np.array(list(map(f,np.arange(dx,x,dx)))) #Condition initiale u(x,-dt), sinusoide 21 | 22 | L = np.zeros(nx-1) #vecteur limite 23 | M = np.zeros((nx-1, nx-1)); 24 | for i in range(0,nx-1): 25 | for j in range(0,nx-1): 26 | if(i==j): 27 | M[i, j] = -2.0; 28 | if(abs(i-j) == 1): 29 | M[i, j] = (1.0); 30 | Resultat= [] 31 | D=np.concatenate((np.array([0]),U_0),axis = 0) 32 | D=np.concatenate((D,np.array([0])),axis = 0) 33 | Resultat.append(D.tolist()) 34 | U_n = U_0 35 | U_n_1 = U_1 36 | B= 2*np.identity(nx-1) + alphacarre*M 37 | C =alphacarre*M 38 | for i in range(nt): 39 | U__n= np.dot(B,U_n) - U_n_1 + np.dot(C,L) 40 | U_n_1=U_n 41 | U_n=U__n 42 | D=np.concatenate((np.array([0]),U_n),axis = 0) 43 | D=np.concatenate((D,np.array([0])),axis = 0) 44 | Resultat.append (D.tolist()) 45 | if i * dt <= T and T<= (i+1)*dt : 46 | U_T = D 47 | Resultat = np.array(Resultat) 48 | 49 | #X est l'axe temporel 50 | #Y est l'axe spatial 51 | X, Y = np.meshgrid(Y,X) 52 | # X est l'axe spatial 53 | # Y est l'axe temporel 54 | ax = plt.axes(projection = '3d') 55 | ax.plot_surface(X,Y,Resultat,cmap='plasma') 56 | plt.show() 57 | 58 | -------------------------------------------------------------------------------- /real_sol.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from mpl_toolkits.mplot3d import Axes3D 4 | import torch 5 | from math import * 6 | 7 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 8 | 9 | 10 | def real_sol(x, t): 11 | u = torch.sin(np.pi*x)*torch.cos(2*np.pi*t) + 0.5 * \ 12 | torch.sin(4*np.pi*x)*torch.cos(8*np.pi*t) 13 | return u 14 | 15 | 16 | def plot_real_sol(lb, ub, N): 17 | x1space = np.linspace(lb[0], ub[0], N) 18 | tspace = np.linspace(lb[1], ub[1], N) 19 | T, X1 = np.meshgrid(tspace, x1space) 20 | 21 | U = real_sol(X1, T) 22 | 23 | plt.style.use('dark_background') 24 | 25 | fig = plt.figure() 26 | ax = fig.add_subplot(111) 27 | ax.scatter(T, X1, c=U, marker='X', vmin=-1, vmax=1) 28 | ax.set_xlabel('$t$') 29 | ax.set_ylabel('$x1$') 30 | plt.savefig(f'results/real_sol.png') 31 | plt.close() 32 | 33 | 34 | def plot_real_sol3D(lb, ub, N): 35 | x1space = np.linspace(lb[0], ub[0], N) 36 | tspace = np.linspace(lb[1], ub[1], N) 37 | T, X1 = np.meshgrid(tspace, x1space) 38 | T = torch.from_numpy(T).view(1, N*N, 1).to(device).float() 39 | X1 = torch.from_numpy(X1).view(1, N*N, 1).to(device).float() 40 | U = real_sol(X1, T) 41 | U = torch.squeeze(U).detach().cpu().numpy() 42 | T, X1 = T.view(N, N).detach().cpu().numpy(), X1.view( 43 | N, N).detach().cpu().numpy() 44 | plt.style.use('dark_background') 45 | 46 | fig = plt.figure() 47 | ax = fig.gca(projection='3d') 48 | ax.scatter(T, X1, U, c=U, marker='X', vmin=-1, vmax=1) 49 | ax.set_xlabel('$t$') 50 | ax.set_ylabel('$x1$') 51 | plt.savefig(f'results/real_sol3D.png') 52 | plt.close() 53 | 54 | 55 | # lb = [0, 0] 56 | # ub = [1, 1] 57 | # N = 100 58 | # plot_real_sol3D(lb, ub, N) 59 | -------------------------------------------------------------------------------- /equation.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from torch import optim 6 | 7 | from torch.autograd import Variable 8 | from torch.autograd import grad 9 | import numpy as np 10 | 11 | 12 | def u0(t, x): 13 | """ 14 | Input: t,x = time and space points for initial condition 15 | Output: u_0(t,x) = solution on initial condition 16 | """ 17 | #return torch.sin(np.pi*x) + 0.5*torch.sin(4*np.pi*x) 18 | n = x.shape[0] 19 | return torch.zeros((n,1)) 20 | 21 | def v0(t, x, dimension): 22 | """ 23 | Input: t,x = time and space points for speed initial condition 24 | dimension = space dimension for model 25 | Output: v_0(t,x) = speed on initial condition 26 | """ 27 | n = x.shape[0] 28 | res = torch.zeros((n, dimension)) 29 | return res 30 | 31 | 32 | def u_bound(t, x, dimension): 33 | """ 34 | Input: t,x = time and space points for boundary condition 35 | dimension = space dimension for model 36 | Output: u_b(t,x) = solution on boundary condition 37 | """ 38 | n = x.shape[0] 39 | res = torch.zeros((n, dimension)) 40 | return res 41 | 42 | 43 | def residual(t, x, u_t, u_tt, u_xx, c): 44 | """ 45 | Input: t,x and derivatives of u 46 | Ouput : residual of PDE 47 | """ 48 | #return u_tt - (c**2)*u_xx 49 | return u_xx + np.pi**2 * torch.sin(np.pi * x) * torch.sin(np.pi * t) 50 | 51 | 52 | def true_u(x, a=0.5, c=2): 53 | """ 54 | Input: x and hyperparameters 55 | Ouput : true forward solution 56 | """ 57 | t = x[:, 0] 58 | x = x[:, 1] 59 | return np.sin(np.pi * x) * np.cos(c * np.pi * t) + a * np.sin(2 * c * np.pi * x) * np.cos(4 * c * np.pi * t) 60 | 61 | # DTYPE = 'float32' 62 | # tf.keras.backend.set_floatx(DTYPE) 63 | # a = tf.constant([0, 1], dtype=DTYPE) 64 | # print(a) 65 | -------------------------------------------------------------------------------- /annexe/annexe_2.py: -------------------------------------------------------------------------------- 1 | # imports 2 | import math 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import numpy.linalg as linalg 6 | from ipywidgets import * 7 | 8 | f = lambda x : np.sin((x*np.pi*2)/10) 9 | 10 | t= 10 #t temps final 11 | x= 10 #x longueur du fil 12 | # Amount of elements 13 | N = 10 14 | # Amount of nodes 15 | n = N - 1 16 | # Element size 17 | h = x/N 18 | # La vitesse de propagation 19 | c = 0.4 20 | # Delta time 21 | nx,nt = 100,100 22 | dx=(x/nx) # dx pas spatial 23 | dt=(t/nt) #dt pas temporel 24 | X=np.arange(0,t+dt,dt) #Vecteurs allant de dt à 25 | Y=np.arange(0,x+dx,dx) 26 | U_0= np.array(list(map(f,np.arange(h,x,h)))) #Condition initiale u(x,0),sinusoide 27 | U_1= np.array(list(map(f,np.arange(h,x,h)))) #Condition initiale u(x,-dt), sinusoide 28 | #nt= 100 29 | Resultat= [] 30 | # Amount of iterations 31 | iterations = 20000 32 | # Stepsize 33 | # stepSize = 100 34 | # Matrix construction 35 | # Time coefficient matrix 36 | A = np.zeros((n, n)) 37 | # A[0, 0] = 1; # Left boundary 38 | # A[N, N] = 1; # Right boundary 39 | for i in range(n): 40 | for j in range(n): 41 | if(i==j): 42 | A[i, j] = (2.0/3.0)*h 43 | if(abs(i-j) == 1): 44 | A[i, j] = (1.0/6.0)*h 45 | # Space coefficient matrix 46 | B = np.zeros((n, n)) 47 | for i in range(n): 48 | for j in range(n): 49 | if(i==j): 50 | B[i, j] = (2.0/h) 51 | if(abs(i-j)== 1): 52 | B[i, j] = -(1.0/h) 53 | # A single time step 54 | invA = linalg.inv(A) 55 | M = -(c**2)*dt*invA.dot(B) 56 | U_n = U_0 57 | U_n_1 = U_1 58 | Resultat= [] 59 | D=np.concatenate((np.array([0]),U_0),axis = 0) 60 | D=np.concatenate((D,np.array([0])),axis = 0) 61 | Resultat.append(D.tolist()) 62 | for i in range(nt): 63 | U__n= np.dot(M+ 2*np.identity(n),U_n)- U_n_1 64 | U_n_1=U_n 65 | U_n=U__n 66 | D=np.concatenate((np.array([0]),U_n),axis = 0) 67 | D=np.concatenate((D,np.array([0])),axis = 0) 68 | Resultat.append(D.tolist()) 69 | Resultat = np.array(Resultat) 70 | 71 | X=np.arange(0,t+dt,dt) 72 | Y=np.arange(0,x+h,h) 73 | 74 | X, Y = np.meshgrid(Y,X) 75 | #X est l'axe spatial 76 | #Y est l'axe temporel 77 | 78 | ax = plt.axes(projection = '3d') 79 | ax.plot_surface(X,Y,Resultat,cmap='plasma') 80 | plt.show() 81 | 82 | 83 | -------------------------------------------------------------------------------- /vrac/bails_sombres.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import math 5 | 6 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 7 | class RNN(nn.Module): 8 | def __init__(self, input_size, hidden_size, output_size, num_layers=2, lstm=False): 9 | super(RNN, self).__init__() 10 | self.hidden_size = hidden_size 11 | self.num_layers = num_layers 12 | self.lstm = lstm 13 | if lstm: 14 | self.rnn = nn.LSTM(input_size, hidden_size, 15 | num_layers, batch_first=True) 16 | else: 17 | self.rnn = nn.RNN(input_size, hidden_size, 18 | num_layers, batch_first=True) 19 | self.fc1 = nn.Linear(hidden_size, hidden_size) 20 | self.fc2 = nn.Linear(hidden_size, output_size) 21 | 22 | def forward(self, x, t): 23 | x = torch.cat([x, t], dim=-1) 24 | h0 = torch.zeros(self.num_layers, x.size(0), 25 | self.hidden_size).to(device) 26 | if self.lstm: 27 | c0 = torch.zeros(self.num_layers, x.size(0), 28 | self.hidden_size).to(device) 29 | out, (hn, cn) = self.rnn(x, (h0, c0)) 30 | hn = hn.view(-1, self.hidden_size) 31 | out = nn.Tanh()(hn) 32 | else: 33 | out, _ = self.rnn(x, h0) 34 | out = nn.Tanh()(self.fc1(out)) 35 | out = self.fc2(out[:, -1, :]) 36 | return out 37 | 38 | class Transformer(nn.Module): 39 | def __init__(self, input_size, hidden_size, output_size, num_layers=2, lstm=False): 40 | super(Transformer, self).__init__() 41 | self.hidden_size = hidden_size 42 | self.num_layers = num_layers 43 | self.lstm = lstm 44 | n_head = 2 45 | head_dim = 16 46 | dmodel = n_head * head_dim 47 | self.emb = nn.Linear(input_size, dmodel) 48 | encoder_layer = nn.TransformerEncoderLayer(dmodel, n_head, dim_feedforward=hidden_size, dropout=0.1) 49 | self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) 50 | self.fc1 = nn.Linear(dmodel, hidden_size) 51 | self.fc2 = nn.Linear(hidden_size, output_size) 52 | 53 | def forward(self, x, t): 54 | x = torch.cat([x, t], dim=-1) 55 | x = self.emb(x) 56 | x = self.transformer_encoder(x) 57 | x = nn.Tanh()(self.fc1(x)) 58 | out = self.fc2(x[:, -1, :]) 59 | return out 60 | 61 | 62 | class GRU(nn.Module): 63 | def __init__(self, input_size, hidden_size, output_size, num_layers=2, lstm=False): 64 | super(GRU, self).__init__() 65 | self.hidden_size = hidden_size 66 | self.num_layers = num_layers 67 | self.lstm = lstm 68 | self.rnn = nn.GRU(input_size, hidden_size, 69 | num_layers, batch_first=True) 70 | self.fc1 = nn.Linear(hidden_size, hidden_size) 71 | self.fc2 = nn.Linear(hidden_size, output_size) 72 | 73 | def forward(self, x, t): 74 | x = torch.cat([x, t], dim=-1) 75 | h0 = torch.zeros(self.num_layers, x.size(0), 76 | self.hidden_size).to(device) 77 | out, _ = self.rnn(x, h0) 78 | out = nn.Tanh()(self.fc1(out)) 79 | out = self.fc2(out[:, -1, :]) 80 | return out -------------------------------------------------------------------------------- /dataset.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from config import DEFAULT_CONFIG 4 | 5 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 6 | N_i = DEFAULT_CONFIG["N_i"] 7 | ########################################################### POINTS DEFINITION ########################################################### 8 | ######################################################################################################################################### 9 | def define_points(N_i,N_b,N_r,l_b,u_b): 10 | t_i = torch.zeros(N_i,1) 11 | x_i = torch.linspace(l_b,u_b,N_i).view(N_i,1) 12 | u_i = torch.sin(np.pi*x_i) + 0.5*torch.sin(4*np.pi*x_i) 13 | #u_i = torch.zeros(N_i,1) 14 | 15 | t_b = torch.linspace(l_b,u_b,N_b).view(N_b,1) 16 | 17 | x_b = torch.bernoulli(0.5*torch.ones(N_b,1)) 18 | u_b = torch.zeros(N_b,1) 19 | 20 | t_r = torch.rand(N_r, 1) 21 | x_r = torch.rand(N_r, 1) 22 | return t_i,x_i,u_i,t_b,x_b,u_b,t_r,x_r 23 | 24 | def define_points_begin(N_ri,l_b,u_b): 25 | t_ri = torch.rand(N_ri,1) * 0.2* (u_b - l_b) 26 | x_ri = torch.rand(N_ri,1) 27 | return t_ri,x_ri 28 | 29 | #Normalize data with min max 30 | def normalize_data(x_r,t_r, 31 | u_b,x_b,t_b, 32 | u_i,x_i,t_i): 33 | x_r,t_r = 2*(x_r-x_r.min())/(x_r.max()-x_r.min())-1, 2*(t_r-t_r.min())/(t_r.max()-t_r.min())-1 34 | x_b,t_b = 2*(x_b-x_b.min())/(x_b.max()-x_b.min())-1, 2*(t_b-t_b.min())/(t_b.max()-t_b.min())-1 35 | x_i,t_i = 2*(x_i-x_i.min())/(x_i.max()-x_i.min())-1, -1*torch.ones(N_i,1) 36 | return x_r,t_r,u_b,x_b,t_b,u_i,x_i,t_i 37 | 38 | def unnormalize_data(x_r,t_r, 39 | u_b,x_b,t_b, 40 | u_i,x_i,t_i, 41 | x_r_min,x_r_max, 42 | t_r_min,t_r_max, 43 | u_b_min,u_b_max, 44 | x_b_min,x_b_max, 45 | t_b_min,t_b_max, 46 | u_i_min,u_i_max, 47 | x_i_min,x_i_max, 48 | t_i_min,t_i_max): 49 | x_r,t_r = x_r*(x_r_max-x_r_min)+x_r_min,t_r*(t_r_max-t_r_min)+t_r_min 50 | u_b,x_b,t_b = u_b*(u_b_max-u_b_min)+u_b_min,x_b*(x_b_max-x_b_min)+x_b_min,t_b*(t_b_max-t_b_min)+t_b_min 51 | u_i,x_i,t_i = u_i*(u_i_max-u_i_min)+u_i_min,x_i*(x_i_max-x_i_min)+x_i_min,t_i*(t_i_max-t_i_min)+t_i_min 52 | return x_r,t_r,u_b,x_b,t_b,u_i,x_i,t_i 53 | 54 | 55 | ############################################################## TRAIN VAL SPLIT ################################################################### 56 | def val_split(x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i, split=0.2): 57 | """Splits data into training and validation set with random order""" 58 | x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i = x_r.to(device), t_r.to(device), u_b.to( 59 | device), x_b.to(device), t_b.to(device), u_i.to(device), x_i.to(device), t_i.to(device) 60 | N_r = x_r.shape[0] 61 | N_b = x_b.shape[0] 62 | N_i = x_i.shape[0] 63 | N_r_val = int(N_r*split) 64 | N_b_val = int(N_b*split) 65 | N_i_val = int(N_i*split) 66 | N_r_train = N_r - N_r_val 67 | N_b_train = N_b - N_b_val 68 | N_i_train = N_i - N_i_val 69 | # Permet de mélanger pr que les données ne soient pas dans l'ordre pr l'entrainement 70 | idx_r = torch.randperm(N_r) 71 | idx_b = torch.randperm(N_b) 72 | idx_i = torch.randperm(N_i) 73 | x_r_train, t_r_train = x_r[idx_r[:N_r_train]], t_r[idx_r[:N_r_train]] 74 | x_r_val, t_r_val = x_r[idx_r[N_r_train:]], t_r[idx_r[N_r_train:]] 75 | u_b_train, x_b_train, t_b_train = u_b[idx_b[:N_b_train] 76 | ], x_b[idx_b[:N_b_train]], t_b[idx_b[:N_b_train]] 77 | u_b_val, x_b_val, t_b_val = u_b[idx_b[N_b_train:] 78 | ], x_b[idx_b[N_b_train:]], t_b[idx_b[N_b_train:]] 79 | u_i_train, x_i_train, t_i_train = u_i[idx_i[:N_i_train] 80 | ], x_i[idx_i[:N_i_train]], t_i[idx_i[:N_i_train]] 81 | u_i_val, x_i_val, t_i_val = u_i[idx_i[N_i_train:] 82 | ], x_i[idx_i[N_i_train:]], t_i[idx_i[N_i_train:]] 83 | return [x_r_train, t_r_train, u_b_train, x_b_train, t_b_train, u_i_train, x_i_train, t_i_train], [x_r_val, t_r_val, u_b_val, x_b_val, t_b_val, u_i_val, x_i_val, t_i_val] -------------------------------------------------------------------------------- /dataset2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from config import DEFAULT_CONFIG 4 | 5 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 6 | N_i = DEFAULT_CONFIG["N_i"] 7 | ########################################################### POINTS DEFINITION ########################################################### 8 | ######################################################################################################################################### 9 | 10 | 11 | def define_points(N_i, N_b, N_r, l_b, u_b): 12 | t_i = torch.zeros(N_i, 1) 13 | x_i = torch.linspace(l_b, u_b, N_i).view(N_i, 1) 14 | y_i = torch.linspace(l_b, u_b, N_i).view(N_i, 1) 15 | #u_i = torch.sin(np.pi*x_i) + 0.5*torch.sin(4*np.pi*x_i) 16 | u_i = torch.zeros(N_i, 1) 17 | 18 | t_b = torch.linspace(l_b, u_b, N_b).view(N_b, 1) 19 | 20 | x_b = torch.bernoulli(0.5*torch.ones(N_b, 1)) 21 | y_b = torch.bernoulli(0.5*torch.ones(N_b, 1)) 22 | u_b = torch.zeros(N_b, 1) 23 | 24 | t_r = torch.rand(N_r, 1) 25 | x_r = torch.rand(N_r, 1) 26 | y_r = torch.rand(N_r, 1) 27 | return t_i, x_i, y_i, u_i, t_b, x_b, y_b, u_b, t_r, x_r, y_r 28 | 29 | 30 | def define_points_begin(N_ri, l_b, u_b): 31 | t_ri = torch.rand(N_ri, 1) * 0.2 * (u_b - l_b) 32 | x_ri = torch.rand(N_ri, 1) 33 | y_ri = torch.rand(N_ri, 1) 34 | return t_ri, x_ri, y_ri 35 | 36 | # Normalize data with min max 37 | 38 | 39 | def normalize_data(x_r, y_r, t_r, 40 | u_b, x_b, y_b, t_b, 41 | u_i, x_i, y_i, t_i): 42 | x_r, t_r = 2*(x_r-x_r.min())/(x_r.max()-x_r.min()) - \ 43 | 1, 2*(t_r-t_r.min())/(t_r.max()-t_r.min())-1 44 | x_b, t_b = 2*(x_b-x_b.min())/(x_b.max()-x_b.min()) - \ 45 | 1, 2*(t_b-t_b.min())/(t_b.max()-t_b.min())-1 46 | x_i, t_i = 2*(x_i-x_i.min())/(x_i.max()-x_i.min())-1, -1*torch.ones(N_i, 1) 47 | y_r = 2*(y_r-y_r.min())/(y_r.max()-y_r.min())-1 48 | y_b = 2*(y_b-y_b.min())/(y_b.max()-y_b.min())-1 49 | y_i = 2*(y_i-y_i.min())/(y_i.max()-y_i.min())-1 50 | return x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i 51 | 52 | 53 | def unnormalize_data(x_r, y_r, t_r, 54 | u_b, x_b, y_b, t_b, 55 | u_i, x_i, y_i, t_i, 56 | x_r_min, x_r_max, 57 | y_r_min, y_r_max, 58 | t_r_min, t_r_max, 59 | u_b_min, u_b_max, 60 | x_b_min, x_b_max, 61 | y_b_min, y_b_max, 62 | t_b_min, t_b_max, 63 | u_i_min, u_i_max, 64 | x_i_min, x_i_max, 65 | y_i_min, y_i_max, 66 | t_i_min, t_i_max): 67 | x_r, t_r = x_r*(x_r_max-x_r_min)+x_r_min, t_r*(t_r_max-t_r_min)+t_r_min 68 | u_b, x_b, t_b = u_b*(u_b_max-u_b_min)+u_b_min, x_b * \ 69 | (x_b_max-x_b_min)+x_b_min, t_b*(t_b_max-t_b_min)+t_b_min 70 | u_i, x_i, t_i = u_i*(u_i_max-u_i_min)+u_i_min, x_i * \ 71 | (x_i_max-x_i_min)+x_i_min, t_i*(t_i_max-t_i_min)+t_i_min 72 | y_r = y_r*(y_r_max-y_r_min)+y_r_min 73 | y_b = y_b*(y_b_max-y_b_min)+y_b_min 74 | y_i = y_i*(y_i_max-y_i_min)+y_i_min 75 | return x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i 76 | 77 | 78 | ############################################################## TRAIN VAL SPLIT ################################################################### 79 | def val_split(x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i, split=0.2): 80 | """Splits data into training and validation set with random order""" 81 | x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i = x_r.to(device), t_r.to(device), u_b.to( 82 | device), x_b.to(device), t_b.to(device), u_i.to(device), x_i.to(device), t_i.to(device) 83 | y_r = y_r.to(device) 84 | y_b = y_b.to(device) 85 | y_i = y_i.to(device) 86 | 87 | N_r = x_r.shape[0] 88 | N_b = x_b.shape[0] 89 | N_i = x_i.shape[0] 90 | N_r_val = int(N_r*split) 91 | N_b_val = int(N_b*split) 92 | N_i_val = int(N_i*split) 93 | N_r_train = N_r - N_r_val 94 | N_b_train = N_b - N_b_val 95 | N_i_train = N_i - N_i_val 96 | # Permet de mélanger pr que les données ne soient pas dans l'ordre pr l'entrainement 97 | idx_r = torch.randperm(N_r) 98 | idx_b = torch.randperm(N_b) 99 | idx_i = torch.randperm(N_i) 100 | x_r_train, t_r_train = x_r[idx_r[:N_r_train]], t_r[idx_r[:N_r_train]] 101 | y_r_train = y_r[idx_r[:N_r_train]] 102 | 103 | x_r_val, t_r_val = x_r[idx_r[N_r_train:]], t_r[idx_r[N_r_train:]] 104 | y_r_val = y_r[idx_r[N_r_train:]] 105 | 106 | u_b_train, x_b_train, t_b_train = u_b[idx_b[:N_b_train] 107 | ], x_b[idx_b[:N_b_train]], t_b[idx_b[:N_b_train]] 108 | y_b_train = y_b[idx_b[:N_b_train]] 109 | 110 | u_b_val, x_b_val, t_b_val = u_b[idx_b[N_b_train:] 111 | ], x_b[idx_b[N_b_train:]], t_b[idx_b[N_b_train:]] 112 | y_b_val = y_b[idx_b[N_b_train:]] 113 | 114 | u_i_train, x_i_train, t_i_train = u_i[idx_i[:N_i_train] 115 | ], x_i[idx_i[:N_i_train]], t_i[idx_i[:N_i_train]] 116 | y_i_train = y_i[idx_i[:N_i_train]] 117 | 118 | u_i_val, x_i_val, t_i_val = u_i[idx_i[N_i_train:] 119 | ], x_i[idx_i[N_i_train:]], t_i[idx_i[N_i_train:]] 120 | y_i_val = y_i[idx_i[N_i_train:]] 121 | 122 | return [x_r_train, y_r_train, t_r_train, u_b_train, x_b_train, y_b_train, t_b_train, u_i_train, x_i_train, y_i_train, t_i_train], [x_r_val, y_r_val, t_r_val, u_b_val, x_b_val, y_b_val, t_b_val, u_i_val, x_i_val, y_i_val, t_i_val] 123 | -------------------------------------------------------------------------------- /real_sol2d.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from mpl_toolkits.mplot3d import Axes3D 4 | import torch 5 | from math import * 6 | 7 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 8 | 9 | 10 | def real_sol(x, y, t): 11 | return np.sin(2*np.pi*x)*np.sin(2*np.pi*y)*np.exp(-4*np.pi**2*t) 12 | 13 | 14 | def real_sol_jsp_trop(x, y, t): 15 | # The following code sample describes solving the 2D wave equation. 16 | # This code will look at a 2D sine wave under initial conditions. 17 | # There are a few different steps for doing this. 18 | 19 | # STEP 2. Set up the position and time grids (or axes). 20 | # Set up the position information. 21 | axis_size = 100 # Size of the 1D grid. 22 | side_length = 1 # Length of one side of one of the wave plot axes. 23 | dx, dy = side_length/axis_size, side_length/axis_size # Space step 24 | axis_points = np.linspace(0, side_length, axis_size) # Spatial grid points 25 | c = 1/np.sqrt(2) # Constant chosen in the 2D wave equation. 26 | 27 | # Set up the time grid to calcuate the equation. 28 | T = 20 # Total time (s) 29 | # Time step size to ensure a stable discretization scheme. 30 | dt = 0.5*(1/c) * (1/np.sqrt(dx**(-2) + dy**(-2))) 31 | n = int(T/dt) # Total number of time steps. 32 | 33 | # STEP 3. Initialization condition function for the 2D wave equation. 2D sine wave pattern in this example. 34 | def initial_cond(x, y): 35 | return np.sin(2*np.pi*x + 2*np.pi*y) 36 | 37 | # Create a meshgrid for the 3D function of initial wave. 38 | X, Y = np.meshgrid(axis_points, axis_points) 39 | 40 | # Calculate the first initial condition using the initialization function. This is the initial 41 | # wave state. 42 | U = initial_cond(X, Y) 43 | 44 | # Assign initial boundary conditions to their own variables. 45 | B1 = U[:, 0] 46 | B2 = U[:, -1] 47 | B3 = U[0, :] 48 | B4 = U[-1, :] 49 | 50 | # Set up matrix for the 2nd initial condition. 51 | U1 = np.zeros((axis_size, axis_size)) 52 | 53 | # Calculate the 2nd initial condition needed for time iteration. 54 | U1[1:-1, 1:-1] = (U[1:-1, 1:-1] + (c**2/2)*(dt**2/dx**2)*(U[1:-1, 0:-2] - 2*U[1:-1, 1:-1] + U[1:-1, 2:]) + 55 | (c**2/2)*(dt**2/dy**2)*(U[0:-2, 1:-1] - 2*U[1:-1, 1:-1] + U[2:, 1:-1])) 56 | 57 | # Reinforce the boundary conditions on the surface after the 2nd initial condition. 58 | U1[:, 0] = B1 59 | U1[:, -1] = B2 60 | U1[0, :] = B3 61 | U1[-1, :] = B4 62 | 63 | # Assign these initial boundary conditions to their own variables. 64 | B5 = U1[:, 0] 65 | B6 = U1[:, -1] 66 | B7 = U1[0, :] 67 | B8 = U1[-1, :] 68 | 69 | # STEP 4. Solve the PDE for a result of all spatial positions after 70 | # time T has elapsed. 71 | # Create a leading array to update the wave at every time step. Initialize it with zeros. 72 | U2 = np.zeros((axis_size, axis_size)) 73 | 74 | # Create an initialized array to store all the wave amplitude map images for each time point. 75 | map_array = np.zeros((axis_size, axis_size, n)) 76 | 77 | # Initialize the first two slices of the array with the two initial wave maps. 78 | map_array[:, :, 0] = U 79 | map_array[:, :, 1] = U1 80 | 81 | # Numerically solve the PDE by iteration over the specified total time. 82 | for i in range(2, n): 83 | 84 | U2[1:-1, 1:-1] = (2*U1[1:-1, 1:-1] - U[1:-1, 1:-1] + (c**2)*((dt/dx)**2)*(U1[1:-1, 0:-2] - 2*U1[1:-1, 1:-1] + 85 | U1[1:-1, 2:]) + (c**2)*((dt/dy)**2)*(U1[0:-2, 1:-1] - 2*U1[1:-1, 1:-1] + 86 | U1[2:, 1:-1])) 87 | 88 | # Direchlet boundary conditions for the wave. 89 | U2[:, 0] = B5 90 | U2[:, -1] = B6 91 | U2[0, :] = B7 92 | U2[-1, :] = B8 93 | 94 | U1[:, 0] = B5 95 | U1[:, -1] = B6 96 | U1[0, :] = B7 97 | U1[-1, :] = B8 98 | 99 | U[:, 0] = B1 100 | U[:, -1] = B2 101 | U[0, :] = B3 102 | U[-1, :] = B4 103 | 104 | # Update the wave array with the 2D wave data. 105 | map_array[:, :, i] = U2 106 | 107 | # Update the trailing wave maps with the leading ones to prepare them for the next time loop iteration. 108 | U = U1 109 | U1 = U2 110 | 111 | return map_array 112 | 113 | def plot_real_sol(lb, ub, N): 114 | x1space = np.linspace(lb[0], ub[0], N) 115 | tspace = np.linspace(lb[1], ub[1], N) 116 | T, X1 = np.meshgrid(tspace, x1space) 117 | 118 | U = real_sol(X1, T) 119 | 120 | plt.style.use('dark_background') 121 | 122 | fig = plt.figure() 123 | ax = fig.add_subplot(111) 124 | ax.scatter(T, X1, c=U, marker='X', vmin=-1, vmax=1) 125 | ax.set_xlabel('$t$') 126 | ax.set_ylabel('$x1$') 127 | plt.savefig(f'results/real_sol.png') 128 | plt.close() 129 | 130 | 131 | def plot_real_sol3D(lb, ub, N): 132 | x1space = np.linspace(lb[0], ub[0], N) 133 | tspace = np.linspace(lb[1], ub[1], N) 134 | T, X1 = np.meshgrid(tspace, x1space) 135 | T = torch.from_numpy(T).view(1, N*N, 1).to(device).float() 136 | X1 = torch.from_numpy(X1).view(1, N*N, 1).to(device).float() 137 | U = real_sol(X1, T) 138 | U = torch.squeeze(U).detach().cpu().numpy() 139 | T, X1 = T.view(N, N).detach().cpu().numpy(), X1.view( 140 | N, N).detach().cpu().numpy() 141 | plt.style.use('dark_background') 142 | 143 | fig = plt.figure() 144 | ax = fig.gca(projection='3d') 145 | ax.scatter(T, X1, U, c=U, marker='X', vmin=-1, vmax=1) 146 | ax.set_xlabel('$t$') 147 | ax.set_ylabel('$x1$') 148 | plt.savefig(f'results/real_sol3D.png') 149 | plt.close() 150 | 151 | 152 | # lb = [0, 0] 153 | # ub = [1, 1] 154 | # N = 100 155 | # plot_real_sol3D(lb, ub, N) 156 | -------------------------------------------------------------------------------- /gradients.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class Jacobian: 4 | """Compute Jacobian matrix J: J[i][j] = dy_i/dx_j, where i = 0, ..., dim_y-1 and 5 | j = 0, ..., dim_x - 1. 6 | It is lazy evaluation, i.e., it only computes J[i][j] when needed. 7 | Args: 8 | ys: Output Tensor of shape (batch_size, dim_y). 9 | xs: Input Tensor of shape (batch_size, dim_x). 10 | """ 11 | 12 | def __init__(self, ys, xs): 13 | self.ys = ys 14 | self.xs = xs 15 | 16 | self.dim_y = ys.shape[1] 17 | self.dim_x = xs.shape[1] 18 | 19 | self.J = {} 20 | 21 | def __call__(self, i=0, j=None): 22 | """Returns J[`i`][`j`]. If `j` is ``None``, returns the gradient of y_i, i.e., 23 | J[i]. 24 | """ 25 | if not 0 <= i < self.dim_y: 26 | raise ValueError("i={} is not valid.".format(i)) 27 | if j is not None and not 0 <= j < self.dim_x: 28 | raise ValueError("j={} is not valid.".format(j)) 29 | if i not in self.J: 30 | y = self.ys[:, i : i + 1] if self.dim_y > 1 else self.ys 31 | self.J[i] = torch.autograd.grad( 32 | y, self.xs, grad_outputs=torch.ones_like(y), create_graph=True 33 | )[0] 34 | 35 | 36 | return ( 37 | self.J[i] if j is None or self.dim_x == 1 else self.J[i][:, j : j + 1] 38 | ) 39 | 40 | class Jacobians: 41 | """Compute multiple Jacobians. 42 | A new instance will be created for a new pair of (output, input). For the (output, 43 | input) pair that has been computed before, it will reuse the previous instance, 44 | rather than creating a new one. 45 | """ 46 | 47 | def __init__(self): 48 | self.Js = {} 49 | 50 | def __call__(self, ys, xs, i=0, j=None): 51 | key = (ys, xs) 52 | if key not in self.Js: 53 | self.Js[key] = Jacobian(ys, xs) 54 | return self.Js[key](i, j) 55 | 56 | def clear(self): 57 | """Clear cached Jacobians.""" 58 | self.Js = {} 59 | 60 | 61 | def jacobian(ys, xs, i=0, j=None): 62 | """Compute Jacobian matrix J: J[i][j] = dy_i / dx_j, where i = 0, ..., dim_y - 1 and 63 | j = 0, ..., dim_x - 1. 64 | Use this function to compute first-order derivatives instead of ``tf.gradients()`` 65 | or ``torch.autograd.grad()``, because 66 | - It is lazy evaluation, i.e., it only computes J[i][j] when needed. 67 | - It will remember the gradients that have already been computed to avoid duplicate 68 | computation. 69 | Args: 70 | ys: Output Tensor of shape (batch_size, dim_y). 71 | xs: Input Tensor of shape (batch_size, dim_x). 72 | i (int): 73 | j (int or None): 74 | Returns: 75 | J[`i`][`j`] in Jacobian matrix J. If `j` is ``None``, returns the gradient of 76 | y_i, i.e., J[`i`]. 77 | """ 78 | return jacobian._Jacobians(ys, xs, i=i, j=j) 79 | 80 | 81 | jacobian._Jacobians = Jacobians() 82 | 83 | 84 | class Hessian: 85 | """Compute Hessian matrix H: H[i][j] = d^2y / dx_i dx_j, where i,j = 0,..., dim_x-1. 86 | It is lazy evaluation, i.e., it only computes H[i][j] when needed. 87 | Args: 88 | y: Output Tensor of shape (batch_size, 1) or (batch_size, dim_y > 1). 89 | xs: Input Tensor of shape (batch_size, dim_x). 90 | component: If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` 91 | is used to compute the Hessian. Do not use if `y` has the shape (batch_size, 92 | 1). 93 | grad_y: The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid 94 | duplicate computation. `grad_y` can be computed from ``Jacobian``. 95 | """ 96 | 97 | def __init__(self, y, xs, component=None, grad_y=None): 98 | dim_y = y.shape[1] 99 | if dim_y > 1: 100 | if component is None: 101 | raise ValueError("The component of y is missing.") 102 | if component >= dim_y: 103 | raise ValueError( 104 | "The component of y={} cannot be larger than the dimension={}.".format( 105 | component, dim_y 106 | ) 107 | ) 108 | else: 109 | if component is not None: 110 | raise ValueError("Do not use component for 1D y.") 111 | component = 0 112 | 113 | if grad_y is None: 114 | grad_y = jacobian(y, xs, i=component, j=None) 115 | self.H = Jacobian(grad_y, xs) 116 | 117 | def __call__(self, i=0, j=0): 118 | """Returns H[`i`][`j`].""" 119 | return self.H(i, j) 120 | 121 | 122 | class Hessians: 123 | """Compute multiple Hessians. 124 | A new instance will be created for a new pair of (output, input). For the (output, 125 | input) pair that has been computed before, it will reuse the previous instance, 126 | rather than creating a new one. 127 | """ 128 | 129 | def __init__(self): 130 | self.Hs = {} 131 | 132 | def __call__(self, y, xs, component=None, i=0, j=0, grad_y=None): 133 | key = (y, xs, component) 134 | if key not in self.Hs: 135 | self.Hs[key] = Hessian(y, xs, component=component, grad_y=grad_y) 136 | return self.Hs[key](i, j) 137 | 138 | def clear(self): 139 | """Clear cached Hessians.""" 140 | self.Hs = {} 141 | 142 | 143 | def hessian(ys, xs, component=None, i=0, j=0, grad_y=None): 144 | """Compute Hessian matrix H: H[i][j] = d^2y / dx_i dx_j, where i,j=0,...,dim_x-1. 145 | Use this function to compute second-order derivatives instead of ``tf.gradients()`` 146 | or ``torch.autograd.grad()``, because 147 | - It is lazy evaluation, i.e., it only computes H[i][j] when needed. 148 | - It will remember the gradients that have already been computed to avoid duplicate 149 | computation. 150 | Args: 151 | ys: Output Tensor of shape (batch_size, dim_y). 152 | xs: Input Tensor of shape (batch_size, dim_x). 153 | component: If dim_y > 1, then `ys[:, component]` is used as y to compute the 154 | Hessian. If dim_y = 1, `component` must be ``None``. 155 | i (int): 156 | j (int): 157 | grad_y: The gradient of y w.r.t. `xs`. Provide `grad_y` if known to avoid 158 | duplicate computation. `grad_y` can be computed from ``jacobian``. Even if 159 | you do not provide `grad_y`, there is no duplicate computation if you use 160 | ``jacobian`` to compute first-order derivatives. 161 | Returns: 162 | H[`i`][`j`]. 163 | """ 164 | return hessian._Hessians(ys, xs, component=component, i=i, j=j, grad_y=grad_y) 165 | 166 | 167 | hessian._Hessians = Hessians() 168 | 169 | 170 | def clear(): 171 | """Clear cached Jacobians and Hessians.""" 172 | jacobian._Jacobians.clear() 173 | hessian._Hessians.clear() -------------------------------------------------------------------------------- /network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | from torch import optim 6 | from torch.autograd import grad 7 | from itertools import chain 8 | import torchsummary 9 | from real_sol import real_sol 10 | from vrac.bails_sombres import RNN, Transformer 11 | from variable_speed import c_fun 12 | from config import DEFAULT_CONFIG 13 | import numpy as np 14 | 15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 16 | 17 | 18 | class Scaling_layer(nn.Module): # Couche de normalisation des données entre -1 et 1 19 | def __init__(self): 20 | super(Scaling_layer, self).__init__() 21 | self.lb = torch.tensor([0.0, 0.0]).to(device) 22 | self.ub = torch.tensor([1.0, 1.0]).to(device) 23 | 24 | def forward(self, x): 25 | return 2 * (x - self.lb) / (self.ub - self.lb) 26 | 27 | # Réseau de neurones 28 | 29 | 30 | class network(torch.jit.ScriptModule): 31 | def __init__(self, N_neurons, N_layers): 32 | super().__init__() 33 | self.num_neurons = N_neurons 34 | self.num_layers = N_layers 35 | self.scaling_layer = Scaling_layer() 36 | self.linear_input = nn.Linear(2, self.num_neurons) 37 | self.linear_hidden = nn.ModuleList( 38 | [nn.Linear(self.num_neurons, self.num_neurons) for _ in range(self.num_layers)]) 39 | self.linear_output = nn.Linear(self.num_neurons, 1) 40 | self.activation = nn.Tanh() 41 | self.dropout = nn.Dropout(0.3) 42 | self.bn = nn.BatchNorm1d(self.num_neurons) 43 | 44 | def forward(self, x): 45 | #x = self.scaling_layer(x) 46 | x = self.activation(self.linear_input(x)) 47 | for i, linear in enumerate(self.linear_hidden): 48 | x = self.activation(linear(x)) 49 | #x = self.bn(x) 50 | #x = self.dropout(x) 51 | x = self.linear_output(x) 52 | return x 53 | 54 | 55 | class PINN(): 56 | def __init__(self, with_rnn=False, N_neurons=64, N_layers=4): 57 | if with_rnn == True: 58 | #self.net = RNN(2, 64, 1, num_layers=4).to(device) 59 | self.net = Transformer(2, 16, 1, num_layers=4).to(device) 60 | else: 61 | self.net = network(N_neurons, N_layers).to(device) 62 | self.optimizer = optim.Adam( 63 | self.net.parameters(), lr=DEFAULT_CONFIG['lr']) # descente de gradient 64 | self.scheduler = optim.lr_scheduler.CosineAnnealingLR( 65 | self.optimizer, T_max=DEFAULT_CONFIG['epochs']) 66 | self.loss_history = [] 67 | self.loss_history_val = [] 68 | 69 | def _model_summary(self): 70 | print(torchsummary.summary(self.net, [(32, 1), (32, 1)])) 71 | 72 | # Calculer résidu 73 | def nth_gradient(self, f, wrt, n): 74 | for i in range(n): 75 | f = list(chain(*f)) 76 | grads = grad(f, wrt, create_graph=True, allow_unused=True,)[0] 77 | f = grads 78 | if grads is None: 79 | print("Bad Grad") 80 | return None 81 | return grads 82 | 83 | def calculate_laplacian(self, model, tensor): 84 | laplacian_x = torch.zeros(tensor.shape[0], 1, device=device) 85 | laplacian_t = torch.zeros(tensor.shape[0], 1, device=device) 86 | for i, tensori in enumerate(tensor): 87 | hess = torch.autograd.functional.hessian( 88 | model, tensori.unsqueeze(0), create_graph=True) 89 | hess = hess.view(2, 2) 90 | laplacian_x[i] = hess[0, 0] 91 | laplacian_t[i] = hess[1, 1] 92 | return laplacian_x, laplacian_t 93 | 94 | def flat(self, x): 95 | m = x.shape[0] 96 | return [x[i] for i in range(m)] 97 | 98 | def f(self, x, t, variable_speed=False): 99 | u_xx, u_tt = self.calculate_laplacian(self.net, torch.cat([x, t], 1)) 100 | if variable_speed: 101 | c = c_fun(x, t) 102 | #residual = u_tt - c*u_xx - (c**2-1)*(np.pi**2)*torch.sin(np.pi*x)*torch.sin(np.pi*t) 103 | residual = u_tt - c*u_xx - 3*torch.sin(np.pi*x)*torch.sin(np.pi*t) 104 | else: 105 | #residual = u_tt - 4*u_xx - 3*(np.pi**2)*torch.sin(np.pi*x)*torch.sin(np.pi*t) 106 | residual = u_tt - 4*u_xx 107 | #lap,_ = self.calculate_laplacian(self.net, torch.cat([x, t], 1)) 108 | #residual = lap - np.pi**2*torch.sin(np.pi*x)*torch.sin(np.pi*t) 109 | return residual 110 | 111 | def loss_first(self, x_ri, t_ri): 112 | real_solution = real_sol(x_ri, t_ri) 113 | u_pred_r = self.net(torch.cat([x_ri, t_ri], 1)) 114 | loss_residual = torch.mean((u_pred_r-real_solution)**2) 115 | return loss_residual 116 | 117 | def loss_fn(self, x_r, t_r, 118 | u_b, x_b, t_b, 119 | u_i, x_i, t_i, validation=False): 120 | 121 | residual = self.f(x_r, t_r) 122 | loss_residual = torch.mean(residual) 123 | 124 | u_pred_b = self.net(torch.cat([x_b, t_b], 1)) 125 | loss_bords = torch.mean((u_pred_b-u_b)**2) 126 | u_pred_i = self.net(torch.cat([x_i, t_i], 1)) 127 | loss_init = torch.mean((u_pred_i-u_i)**2) 128 | 129 | """ 130 | grad_third = grad(outputs=residual, inputs=Variable(t_r,requires_grad=True), grad_outputs=torch.ones_like(residual,device=device), create_graph=True,retain_graph=True,only_inputs=True) 131 | grad_third = grad_third[0] 132 | loss_residual_third = torch.mean((grad_third)**2) 133 | loss_residual = loss_residual + loss_residual_third 134 | """ 135 | 136 | #Compute derivative of u_pred_b with respect to t_b 137 | #u_pred_b_t = torch.autograd.functional.jacobian(self.net, torch.cat([x_b, t_b], 1), create_graph=True) 138 | loss_bords_der = torch.zeros(1, device=device) 139 | #loss_bords_der = torch.mean((u_pred_b_t)**2) 140 | 141 | """ 142 | #Add truncated boundary c u_tx - u_tt = 0 at t = 1 143 | t_r_1 = torch.ones_like(t_r, requires_grad=True) 144 | u_pred_t_1 = self.net(x_r, t_r_1) 145 | u_pred_t_1_x = self.nth_gradient(self.flat(u_pred_t_1), wrt=x_r, n=1) 146 | u_pred_t_1_x_t = self.nth_gradient(self.flat(u_pred_t_1_x), wrt=t_r_1, n=1) 147 | u_tt = self.nth_gradient(self.flat(u_pred_t_1), wrt=t_r_1, n=2) 148 | loss_trunc = torch.mean((u_pred_t_1_x_t - u_tt)**2) 149 | """ 150 | loss_trunc = torch.zeros(1, device=device) 151 | return loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc 152 | 153 | def train_step(self, train_data, phase="later"): 154 | if phase == "beginning": 155 | t_ri, x_ri = train_data 156 | self.net.train() 157 | self.optimizer.zero_grad() 158 | loss = self.loss_first(x_ri, t_ri) 159 | loss.backward() 160 | self.optimizer.step() 161 | return loss.item() 162 | else: 163 | x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i, = train_data 164 | self.net.train() 165 | self.optimizer.zero_grad() 166 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = self.loss_fn(x_r, t_r, 167 | u_b, x_b, t_b, 168 | u_i, x_i, t_i) 169 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + 0.5*loss_trunc 170 | loss.backward() 171 | self.optimizer.step() 172 | return loss_residual.item(), loss_bords.item(), loss_init.item(), loss_bords_der.item(), loss_trunc.item() 173 | 174 | def val_step(self, val_data, phase="later"): 175 | if phase == "beginning": 176 | x_ri, t_ri = val_data 177 | loss = self.loss_first(x_ri, t_ri) 178 | return loss.item() 179 | else: 180 | x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i = val_data 181 | self.net.eval() 182 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = self.loss_fn(x_r, t_r, 183 | u_b, x_b, t_b, 184 | u_i, x_i, t_i, validation=False) 185 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + loss_trunc 186 | return loss.item() 187 | 188 | def accuracy_step(self, val_data): 189 | x_r, t_r, _, _, _, _, _, _ = val_data 190 | self.net.eval() 191 | # Compute MSE between real_sol and net 192 | with torch.no_grad(): 193 | u_pred = self.net(torch.cat([x_r, t_r], 1)) 194 | real_u = real_sol(x_r, t_r) 195 | num = torch.mean(torch.square(u_pred-real_u)) 196 | #den = torch.mean(torch.square(real_u)) 197 | return num.item() 198 | -------------------------------------------------------------------------------- /variational_network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | from torch import optim 6 | from torch.autograd import grad 7 | from itertools import chain 8 | import torchsummary 9 | from real_sol import real_sol 10 | from variable_speed import c_fun 11 | from config import DEFAULT_CONFIG 12 | import numpy as np 13 | from dataset import * 14 | import matplotlib.pyplot as plt 15 | from tqdm import tqdm 16 | from gradients import hessian, clear 17 | 18 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 19 | print(device) 20 | 21 | 22 | def plot1dgrid_real(lb, ub, N, model, k): 23 | """Same for the real solution""" 24 | x1space = np.linspace(lb[0], ub[0], N) 25 | tspace = np.linspace(lb[1], ub[1], N) 26 | X1, T = np.meshgrid(tspace, x1space) 27 | T = torch.from_numpy(T).view(1, N*N, 1).to(device).float() 28 | X1 = torch.from_numpy(X1).view(1, N*N, 1).to(device).float() 29 | T = T.transpose(0, 1).squeeze(-1) 30 | X1 = X1.transpose(0, 1).squeeze(-1) 31 | upred = model(torch.cat((X1, T), 1)) 32 | U = torch.squeeze(upred).detach().cpu().numpy() 33 | U = upred.view(N, N).detach().cpu().numpy() 34 | T, X1 = T.view(N, N).detach().cpu().numpy(), X1.view( 35 | N, N).detach().cpu().numpy() 36 | z_array = np.zeros((N, N)) 37 | for i in range(N): 38 | z_array[:, i] = U[i] 39 | 40 | fig = plt.figure() 41 | ax = fig.add_subplot(121) 42 | ax.scatter(T, X1, c=U, marker='X') 43 | ax.set_xlabel('$t$') 44 | ax.set_ylabel('$x1$') 45 | 46 | # Partie 3d 47 | ax1 = fig.add_subplot(122, projection='3d') 48 | ax1.scatter(T, X1, U, c=U, marker='X') 49 | ax1.set_xlabel('$t$') 50 | ax1.set_ylabel('$x1$') 51 | 52 | plt.savefig('figs/real_sol_{}.png'.format(k)) 53 | plt.close() 54 | 55 | 56 | def plot_loss(train_losses, val_losses): 57 | """Plot the loss""" 58 | plt.figure() 59 | plt.plot(train_losses, label='Train') 60 | plt.plot(val_losses, label='Validation') 61 | plt.legend() 62 | plt.xlabel('Epochs') 63 | plt.ylabel('Loss') 64 | plt.savefig('figs/loss.png') 65 | plt.close() 66 | 67 | # Réseau de neurones 68 | 69 | 70 | class CubicReLU(nn.Module): 71 | def __init__(self): 72 | super(CubicReLU, self).__init__() 73 | self.device = torch.device( 74 | "cuda" if torch.cuda.is_available() else "cpu") 75 | 76 | def forward(self, x): 77 | return torch.max(torch.tensor(0.0, device=self.device), x) ** 3 78 | 79 | 80 | class network(torch.jit.ScriptModule): 81 | def __init__(self, N_neurons, N_layers): 82 | super().__init__() 83 | self.num_neurons = N_neurons 84 | self.num_layers = N_layers 85 | self.linear_input = nn.Linear(2, self.num_neurons) 86 | self.linear_hidden = nn.ModuleList( 87 | [nn.Linear(self.num_neurons, self.num_neurons) for _ in range(self.num_layers)]) 88 | self.linear_output = nn.Linear(self.num_neurons, 1) 89 | self.activation = nn.Tanh() # nn.Tanh() if not working 90 | 91 | def forward(self, x): 92 | x = self.activation(self.linear_input(x)) 93 | for i, linear in enumerate(self.linear_hidden): 94 | x = self.activation(linear(x)) 95 | x = self.linear_output(x) 96 | return x 97 | 98 | 99 | class PINN(): 100 | def __init__(self, segments, N_neurons=300, N_layers=4): 101 | self.net = network(N_neurons, N_layers).to(device) 102 | self.optimizer = optim.Adam( 103 | self.net.parameters(), lr=DEFAULT_CONFIG['lr']) # descente de gradient 104 | self.scheduler = optim.lr_scheduler.CosineAnnealingLR( 105 | self.optimizer, T_max=DEFAULT_CONFIG['epochs']) 106 | self.loss_history = [] 107 | self.loss_history_val = [] 108 | self.segments = segments 109 | 110 | def _model_summary(self): 111 | print(torchsummary.summary(self.net, [(32, 1), (32, 1)])) 112 | 113 | # Calculer résidu 114 | def nth_gradient(self, f, wrt, n): 115 | for i in range(n): 116 | f = list(chain(*f)) 117 | grads = grad(f, wrt, create_graph=True, allow_unused=True,)[0] 118 | f = grads 119 | if grads is None: 120 | print("Bad Grad") 121 | return None 122 | return grads 123 | 124 | def calculate_laplacian(self, model, tensor): 125 | laplacian_x = torch.zeros(tensor.shape[0], 1, device=device) 126 | laplacian_t = torch.zeros(tensor.shape[0], 1, device=device) 127 | for i, tensori in enumerate(tensor): 128 | hess = torch.autograd.functional.hessian( 129 | model, tensori.unsqueeze(0), create_graph=True) 130 | hess = hess.view(2, 2) 131 | laplacian_x[i] = hess[0, 0] 132 | laplacian_t[i] = hess[1, 1] 133 | return laplacian_x, laplacian_t 134 | 135 | def flat(self, x): 136 | m = x.shape[0] 137 | return [x[i] for i in range(m)] 138 | 139 | def dist(self, x1, t1, x2, t2): 140 | return torch.sqrt((x1-x2)**2+(t1-t2)**2) 141 | 142 | def linseg(self, x, t, x1, t1, x2, t2): 143 | L = self.dist(x1, t1, x2, t2) 144 | xc = (x1+x2)*0.5 145 | tc = (t1+t2)*0.5 146 | f = (1/L)*((x-x1)*(t2-t1)-(t-t1)*(x2-x1)) 147 | tk = (1/L)*((L/2.)**2 - self.dist(x, t, xc, tc)**2) 148 | varphi = torch.sqrt(tk**2 + f**4) 149 | phi = torch.sqrt(f**2 + 0.25*(varphi-tk)**2) 150 | return phi 151 | 152 | def phi(self, x, t): # segments is an array of all the segments composing the boundary 153 | m = 1. 154 | R = 0. 155 | for i in range(len(self.segments[:, 0])): 156 | phi_v = self.linseg( 157 | x, t, self.segments[i, 0], self.segments[i, 1], self.segments[i, 2], self.segments[i, 3]) 158 | R = R + 1./phi_v**m 159 | R = 1/R**(1/m) 160 | return R 161 | 162 | def u(self, z): 163 | x, t = z[:, 0], z[:, 1] 164 | x, t = x.unsqueeze(1), t.unsqueeze(1) 165 | w = self.phi(x, t)*self.net(z) 166 | # add initial condition oftorch.sin(np.pi*x) + 0.5*torch.sin(4*np.pi*x) for t = 0 167 | #w[t==0] += torch.sin(np.pi*x[t==0]) + 0.5*torch.sin(4*np.pi*x[t==0]) 168 | # w[t==0] = 5 169 | # w += torch.exp(-t**2/0.1) * (torch.sin(np.pi*x) + 0.5*torch.sin(4*np.pi*x))* 170 | w += torch.exp(-t**2/0.1) * torch.sin(np.pi*x) 171 | 172 | return w 173 | 174 | def loss(self, x, t): 175 | # Gradient True 176 | x, t = x.requires_grad_(True), t.requires_grad_(True) 177 | #laplacian_u_x, laplacian_u_t = self.calculate_laplacian(self.u, torch.cat((x, t), 1)) 178 | 179 | # u_tt = self.nth_gradient(self.u(torch.cat((x, t), 1)), t, 3) 180 | # u_xx = self.nth_gradient(self.u(torch.cat((x, t), 1)), x, 3) 181 | points = torch.cat((x, t), 1) 182 | pred = self.u(points) 183 | u_tt = hessian(pred, points, i=1, j=1) 184 | u_xx = hessian(pred, points, i=0, j=0) 185 | clear() 186 | f = u_tt - 4 * u_xx 187 | #- 3*(np.pi**2)*torch.sin(np.pi*x)*torch.sin(np.pi*t) 188 | loss = torch.mean(f**2) 189 | return loss 190 | 191 | def train(self, x, t, x_val, t_val, epochs=DEFAULT_CONFIG['epochs']): 192 | progress_bar = tqdm(range(epochs)) 193 | for epoch in progress_bar: 194 | self.optimizer.zero_grad() 195 | loss = self.loss(x, t) 196 | loss.backward() 197 | self.optimizer.step() 198 | self.scheduler.step() 199 | 200 | loss_val = self.loss(x_val, t_val) 201 | self.loss_history_val.append(loss_val.item()) 202 | if epoch % 100 == 0: 203 | plot1dgrid_real([0, 0], [1, 1], 100, self.u, epoch) 204 | plot_loss(self.loss_history, self.loss_history_val) 205 | progress_bar.set_description( 206 | f"Loss: {loss.item():.4f}, Loss_val: {loss_val.item():.4f}") 207 | return self.loss_history, self.loss_history_val 208 | 209 | def predict(self, x, t): 210 | with torch.no_grad(): 211 | x.requires_grad = False 212 | t.requires_grad = False 213 | u = self.u(torch.cat((x, t), 1)) 214 | return u 215 | 216 | 217 | torch.set_grad_enabled(True) 218 | 219 | segments = torch.tensor( 220 | [[0, 0, 0, 1], [0, 1, 1, 1], [1, 1, 1, 0], [1, 0, 0, 0]], device=device) 221 | PINN = PINN(segments, N_neurons=200, N_layers=4) 222 | N_points = 500 223 | x = torch.linspace(0.001, 0.999, N_points, device=device).unsqueeze(1) 224 | t = torch.linspace(0.001, 0.999, N_points, device=device).unsqueeze(1) 225 | 226 | x_train = x.repeat(N_points, 1) 227 | t_train = t.repeat(N_points, 1).t().reshape(-1, 1) 228 | 229 | x_val = torch.linspace(0.001, 0.999, 200, device=device).unsqueeze(1) 230 | t_val = torch.linspace(0.001, 0.999, 200, device=device).unsqueeze(1) 231 | 232 | loss_history, loss_history_val = PINN.train(x, t, x_val, t_val, epochs=10000) 233 | 234 | plt.plot(loss_history) 235 | plt.plot(loss_history_val) 236 | plt.show() 237 | -------------------------------------------------------------------------------- /network2d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | from torch import optim 6 | from torch.autograd import grad 7 | from itertools import chain 8 | import torchsummary 9 | from real_sol2d import real_sol 10 | from vrac.bails_sombres import RNN, Transformer 11 | from variable_speed import c_fun 12 | from config import DEFAULT_CONFIG 13 | import numpy as np 14 | 15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 16 | 17 | 18 | class Scaling_layer(nn.Module): # Couche de normalisation des données entre -1 et 1 19 | def __init__(self): 20 | super(Scaling_layer, self).__init__() 21 | 22 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 23 | self.lb = torch.tensor([0.0, 0.0, 0.0]).to(device) # lower bound 24 | self.ub = torch.tensor([1.0, 1.0, 1.0]).to(device) # upper bound 25 | 26 | def forward(self, x): 27 | return 2 * (x - self.lb) / (self.ub - self.lb) 28 | 29 | # Réseau de neurones 30 | 31 | 32 | class network(torch.jit.ScriptModule): 33 | def __init__(self, N_neurons, N_layers): 34 | super().__init__() 35 | self.num_neurons = N_neurons 36 | self.num_layers = N_layers 37 | self.scaling_layer = Scaling_layer() 38 | 39 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 40 | self.linear_input = nn.Linear(3, self.num_neurons) 41 | self.linear_hidden = nn.ModuleList( 42 | [nn.Linear(self.num_neurons, self.num_neurons) for _ in range(self.num_layers)]) 43 | self.linear_output = nn.Linear(self.num_neurons, 1) 44 | self.activation = nn.Tanh() 45 | self.dropout = nn.Dropout(0.3) 46 | self.bn = nn.BatchNorm1d(self.num_neurons) 47 | 48 | def forward(self, x): 49 | #x = self.scaling_layer(x) 50 | x = self.activation(self.linear_input(x)) 51 | for i, linear in enumerate(self.linear_hidden): 52 | x = self.activation(linear(x)) 53 | #x = self.bn(x) 54 | x = self.dropout(x) 55 | x = self.linear_output(x) 56 | return x 57 | 58 | 59 | class PINN(): 60 | def __init__(self, with_rnn=False, N_neurons=64, N_layers=4): 61 | if with_rnn == True: 62 | #self.net = RNN(2, 64, 1, num_layers=4).to(device) 63 | 64 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 65 | self.net = Transformer(3, 16, 1, num_layers=4).to(device) 66 | else: 67 | self.net = network(N_neurons, N_layers).to(device) 68 | self.optimizer = optim.Adam( 69 | self.net.parameters(), lr=DEFAULT_CONFIG['lr']) # descente de gradient 70 | self.scheduler = optim.lr_scheduler.CosineAnnealingLR( 71 | self.optimizer, T_max=DEFAULT_CONFIG['epochs']) 72 | self.loss_history = [] 73 | self.loss_history_val = [] 74 | 75 | def _model_summary(self): 76 | print(torchsummary.summary(self.net, [(32, 1), (32, 1)])) 77 | 78 | # Calculer résidu 79 | def nth_gradient(self, f, wrt, n): 80 | for i in range(n): 81 | f = list(chain(*f)) 82 | grads = grad(f, wrt, create_graph=True, allow_unused=True,)[0] 83 | f = grads 84 | if grads is None: 85 | print("Bad Grad") 86 | return None 87 | return grads 88 | 89 | def calculate_laplacian(self, model, tensor): 90 | laplacian_x = torch.zeros(tensor.shape[0], 1, device=device) 91 | laplacian_t = torch.zeros(tensor.shape[0], 1, device=device) 92 | 93 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 94 | laplacian_y = torch.zeros(tensor.shape[0], 1, device=device) 95 | 96 | for i, tensori in enumerate(tensor): 97 | hess = torch.autograd.functional.hessian( 98 | model, tensori.unsqueeze(0), create_graph=True) 99 | hess = hess.view(2, 2) 100 | laplacian_x[i] = hess[0, 0] 101 | laplacian_t[i] = hess[2, 2] 102 | 103 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 104 | laplacian_y[i] = hess[1, 1] 105 | return laplacian_x, laplacian_y, laplacian_t 106 | 107 | def flat(self, x): 108 | m = x.shape[0] 109 | return [x[i] for i in range(m)] 110 | 111 | def f(self, x, y, t, variable_speed=False): 112 | u_xx, u_yy, u_tt = self.calculate_laplacian( 113 | self.net, torch.cat([x, y, t], 1)) 114 | if variable_speed: 115 | c = c_fun(x, y, t) 116 | #residual = u_tt - c*u_xx - (c**2-1)*(np.pi**2)*torch.sin(np.pi*x)*torch.sin(np.pi*t) 117 | 118 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 119 | residual = u_tt - c*(u_xx+u_yy) 120 | else: 121 | residual = u_tt - 4*(u_xx+u_yy) 122 | #lap,_ = self.calculate_laplacian(self.net, torch.cat([x, t], 1)) 123 | #residual = lap - np.pi**2*torch.sin(np.pi*x)*torch.sin(np.pi*t) 124 | return residual 125 | 126 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 127 | def loss_first(self, x_ri, y_ri, t_ri): 128 | real_solution = real_sol(x_ri, y_ri, t_ri) 129 | u_pred_r = self.net(torch.cat([x_ri, y_ri, t_ri], 1)) 130 | loss_residual = torch.mean( 131 | (u_pred_r-real_solution)**2) 132 | return loss_residual 133 | 134 | # On est maintenant en 2D donc on a 3 entrées (x,y,t) 135 | def loss_fn(self, x_r, y_r, t_r, 136 | u_b, x_b, y_b, t_b, 137 | u_i, x_i, y_i, t_i, validation=False): 138 | 139 | loss_residual = torch.mean(torch.abs(self.f(x_r, y_r, t_r))) 140 | 141 | u_pred_b = self.net(torch.cat([x_b, y_b, t_b], 1)) 142 | loss_bords = torch.mean((u_pred_b-u_b)**2) 143 | u_pred_i = self.net(torch.cat([x_i, y_i, t_i], 1)) 144 | loss_init = torch.mean((u_pred_i-u_i)**2) 145 | 146 | # Compute derivative of u_pred_b with respect to t_b 147 | #u_pred_b_t = torch.autograd.functional.jacobian(self.net, torch.cat([x_b, t_b], 1), create_graph=True) 148 | loss_bords_der = torch.zeros(1, device=device) 149 | #loss_bords_der = torch.mean((u_pred_b_t)**2) 150 | 151 | """ 152 | #Add truncated boundary c u_tx - u_tt = 0 at t = 1 153 | t_r_1 = torch.ones_like(t_r, requires_grad=True) 154 | u_pred_t_1 = self.net(x_r, t_r_1) 155 | u_pred_t_1_x = self.nth_gradient(self.flat(u_pred_t_1), wrt=x_r, n=1) 156 | u_pred_t_1_x_t = self.nth_gradient(self.flat(u_pred_t_1_x), wrt=t_r_1, n=1) 157 | u_tt = self.nth_gradient(self.flat(u_pred_t_1), wrt=t_r_1, n=2) 158 | loss_trunc = torch.mean((u_pred_t_1_x_t - u_tt)**2) 159 | """ 160 | loss_trunc = torch.zeros(1, device=device) 161 | return loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc 162 | 163 | def train_step(self, train_data, phase="later"): 164 | if phase == "beginning": 165 | t_ri, x_ri, y_ri = train_data 166 | self.net.train() 167 | self.optimizer.zero_grad() 168 | loss = self.loss_first(x_ri, y_ri, t_ri) 169 | loss.backward() 170 | self.optimizer.step() 171 | return loss.item() 172 | else: 173 | x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i, = train_data 174 | self.net.train() 175 | self.optimizer.zero_grad() 176 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = self.loss_fn(x_r, y_r, t_r, 177 | u_b, x_b, y_b, t_b, 178 | u_i, x_i, y_i, t_i) 179 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + 0.5*loss_trunc 180 | loss.backward() 181 | self.optimizer.step() 182 | return loss_residual.item(), loss_bords.item(), loss_init.item(), loss_bords_der.item(), loss_trunc.item() 183 | 184 | def val_step(self, val_data, phase="later"): 185 | if phase == "beginning": 186 | x_ri, y_ri, t_ri = val_data 187 | loss = self.loss_first(x_ri, y_ri, t_ri) 188 | return loss.item() 189 | else: 190 | x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i = val_data 191 | self.net.eval() 192 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = self.loss_fn(x_r, y_r, t_r, 193 | u_b, x_b, y_b, t_b, 194 | u_i, x_i, y_i, t_i, validation=False) 195 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + loss_trunc 196 | return loss.item() 197 | 198 | def accuracy_step(self, val_data): 199 | x_r, y_r, t_r, _, _, _, _, _, _ = val_data 200 | self.net.eval() 201 | # Compute MSE between real_sol and net 202 | with torch.no_grad(): 203 | u_pred = self.net(torch.cat([x_r, y_r, t_r], 1)) 204 | real_u = real_sol(x_r, y_r, t_r) 205 | num = torch.mean(torch.square(u_pred-real_u)) 206 | #den = torch.mean(torch.square(real_u)) 207 | return num.item() 208 | -------------------------------------------------------------------------------- /pinn_training.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from tqdm import tqdm 4 | import matplotlib.pyplot as plt 5 | from network import PINN 6 | import torch.functional as F 7 | from real_sol import real_sol 8 | from config import DEFAULT_CONFIG 9 | from dataset import * 10 | from torch.utils.tensorboard import SummaryWriter 11 | import cProfile 12 | 13 | writer = SummaryWriter() 14 | 15 | ############################################################## POINTS PLOTTING ############################################################# 16 | ############################################################################################################################################ 17 | 18 | 19 | def plot_training_points(t_0, t_b, t_r, x_0, x_b, x_r, u_0, u_b): 20 | """ 21 | Input: -dimension = spatial dimension 22 | -time_x = [t_0,t_b,t_r,x_0,x_b,x_r,u_0,u_b] 23 | Output: display training points in either 1,2 or 3D 24 | """ 25 | fig = plt.figure(figsize=(9, 6)) 26 | ax = fig.add_subplot(111) 27 | ax.scatter(t_0, x_0[:, 0], c=u_0, marker='X', vmin=-1, vmax=1) 28 | ax.scatter(t_b, x_b[:, 0], c=u_b, marker='X', vmin=-1, vmax=1) 29 | ax.scatter(t_r, x_r[:, 0], c='r', marker='.', alpha=0.1) 30 | ax.set_xlabel('$t$') 31 | ax.set_ylabel('$x1$') 32 | ax.set_title('Positions of collocation points and boundary data') 33 | plt.show() 34 | 35 | ########################################################## PLOTTING FUNCTIONS ########################################################### 36 | ########################################################################################################################################## 37 | 38 | 39 | def plot1dgrid_real(lb, ub, N, model, k, with_rnn=False): 40 | """Same for the real solution""" 41 | model = model.net 42 | x1space = np.linspace(lb[0], ub[0], N) 43 | tspace = np.linspace(lb[1], ub[1], N) 44 | T, X1 = np.meshgrid(tspace, x1space) 45 | T = torch.from_numpy(T).view(1, N*N, 1).to(device).float() 46 | X1 = torch.from_numpy(X1).view(1, N*N, 1).to(device).float() 47 | if not with_rnn: 48 | T = T.transpose(0, 1).squeeze(-1) 49 | X1 = X1.transpose(0, 1).squeeze(-1) 50 | else: 51 | T = T.transpose(0, 1) 52 | X1 = X1.transpose(0, 1) 53 | upred = model(torch.cat((X1, T), 1)) 54 | U = torch.squeeze(upred).detach().cpu().numpy() 55 | U = upred.view(N, N).detach().cpu().numpy() 56 | T, X1 = T.view(N, N).detach().cpu().numpy(), X1.view( 57 | N, N).detach().cpu().numpy() 58 | z_array = np.zeros((N, N)) 59 | for i in range(N): 60 | z_array[:, i] = U[i] 61 | 62 | fig = plt.figure() 63 | ax = fig.add_subplot(121) 64 | ax.scatter(T, X1, c=U, marker='X') 65 | ax.set_xlabel('$t$') 66 | ax.set_ylabel('$x1$') 67 | 68 | # Partie 3d 69 | ax1 = fig.add_subplot(122, projection='3d') 70 | ax1.scatter(T, X1, U, c=U, marker='X') 71 | ax1.set_xlabel('$t$') 72 | ax1.set_ylabel('$x1$') 73 | 74 | plt.savefig(f'results/real_sol_{k}') 75 | writer.add_figure(f'real_sol_{k}', fig) 76 | plt.close() 77 | 78 | # Plot train and val losses on same figure 79 | 80 | 81 | def plot_loss(train_losses, val_losses, accuracy): 82 | fig, (ax1, ax2) = plt.subplots(2, 1) 83 | plt.style.use('dark_background') 84 | ax1.plot(train_losses, label='train') 85 | ax1.plot(val_losses, label='val') 86 | ax2.plot(accuracy, label="error", color='red') 87 | 88 | ax1.set(ylabel='Loss') 89 | ax2.set(ylabel='Error') 90 | plt.xlabel('Epoch') 91 | 92 | ax1.legend() 93 | ax2.legend() 94 | plt.savefig(f'results/loss') 95 | plt.close() 96 | 97 | ########################################################### TRAINING ########################################################### 98 | ################################################################################################################################ 99 | 100 | 101 | def train(model, train_data, val_data, train_data_begin, 102 | epochs): 103 | epochs = tqdm(range(epochs), desc="Training") 104 | losses = [] 105 | val_losses = [] 106 | acc = [] 107 | for epoch in epochs: 108 | # Shuffle train_data 109 | index_shuf_r = torch.randperm(train_data[0].shape[0]) 110 | index_shuf_b = torch.randperm(train_data[2].shape[0]) 111 | index_shuf_i = torch.randperm(train_data[5].shape[0]) 112 | x_r_train = train_data[0][index_shuf_r].to(device) 113 | t_r_train = train_data[1][index_shuf_r].to(device) 114 | u_b_train = train_data[2][index_shuf_b].to(device) 115 | x_b_train = train_data[3][index_shuf_b].to(device) 116 | t_b_train = train_data[4][index_shuf_b].to(device) 117 | u_i_train = train_data[5][index_shuf_i].to(device) 118 | x_i_train = train_data[6][index_shuf_i].to(device) 119 | t_i_train = train_data[7][index_shuf_i].to(device) 120 | train_data_new = [x_r_train, t_r_train, u_b_train, 121 | x_b_train, t_b_train, u_i_train, x_i_train, t_i_train] 122 | # Shuffle train_data_begin 123 | index_shuf_b = torch.randperm(train_data_begin[0].shape[0]) 124 | t_b_train = train_data_begin[0][index_shuf_b].to(device) 125 | x_b_train = train_data_begin[1][index_shuf_b].to(device) 126 | train_data_begin = [t_b_train, x_b_train] 127 | train_data = train_data_new 128 | if epoch < 0: 129 | loss_begin = model.train_step(train_data_begin, phase="beginning") 130 | epochs.set_postfix(loss=loss_begin) 131 | else: 132 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = model.train_step( 133 | train_data) 134 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + loss_trunc 135 | val_loss = model.val_step(val_data) 136 | error = model.accuracy_step(val_data) 137 | epochs.set_postfix(loss_residual=loss_residual, 138 | loss_bords=loss_bords, 139 | loss_init=loss_init, 140 | loss_bords_der=loss_bords_der, 141 | loss_trunc=loss_trunc, 142 | loss=loss, 143 | val_loss=val_loss, 144 | accuracy=error) 145 | 146 | #Scheduler step 147 | #model.scheduler.step() 148 | 149 | # Append loss lists (and eventually log for Tensorboard) 150 | losses.append(loss) 151 | val_losses.append(val_loss) 152 | acc.append(error) 153 | 154 | writer.add_scalar('Loss_residual', loss_residual, epoch) 155 | writer.add_scalar('Loss_bords', loss_bords, epoch) 156 | writer.add_scalar('Loss_init', loss_init, epoch) 157 | writer.add_scalar('Loss_bords_der', loss_bords_der, epoch) 158 | writer.add_scalar('Loss_trunc', loss_trunc, epoch) 159 | writer.add_scalar('Loss', loss, epoch) 160 | writer.add_scalar('Val_loss', val_loss, epoch) 161 | writer.add_scalar('Error', error, epoch) 162 | 163 | plot_loss(losses, val_losses, acc) 164 | 165 | if epoch % 100 == 0: 166 | plot1dgrid_real(lb, ub, N_plotting, model, epoch) 167 | if epoch % 1000 == 0: 168 | torch.save(model.net.state_dict(), f"results/model_{epoch}.pt") 169 | 170 | 171 | if __name__ == '__main__': 172 | seed = 42 173 | torch.manual_seed(seed) 174 | np.random.seed(seed) 175 | torch.backends.cudnn.deterministic = True 176 | torch.backends.cudnn.benchmark = True 177 | 178 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 179 | print(device) 180 | 181 | N_i, N_b, N_r = DEFAULT_CONFIG['N_i'], DEFAULT_CONFIG['N_b'], DEFAULT_CONFIG['N_r'] 182 | l_b, u_b = DEFAULT_CONFIG['l_b'], DEFAULT_CONFIG['u_b'] 183 | N_neurons, N_layers = DEFAULT_CONFIG['N_neurons'], DEFAULT_CONFIG['N_layers'] 184 | 185 | with_rnn = False 186 | net = PINN(with_rnn=with_rnn, N_neurons=N_neurons, N_layers=N_layers) 187 | # net._model_summary() 188 | 189 | # Write config to tensorboard 190 | writer.add_text('Config', str(DEFAULT_CONFIG)) 191 | 192 | t_ri, x_ri = define_points_begin(10000, l_b, u_b) 193 | t_i, x_i, u_i, t_b, x_b, u_b, t_r, x_r = define_points( 194 | N_i, N_b, N_r, l_b, u_b) 195 | 196 | # x_r,t_r,u_b,x_b,t_b,u_i,x_i,t_i = normalize_data(x_r,t_r, 197 | # u_b,x_b,t_b, 198 | # u_i,x_i,t_i) 199 | plot_training_points(t_i.data.numpy(), 200 | t_b.data.numpy(), 201 | t_r.data.numpy(), 202 | x_i.data.numpy(), 203 | x_b.data.numpy(), 204 | x_r.data.numpy(), 205 | u_i.data.numpy(), 206 | u_b.data.numpy()) 207 | 208 | train_data, val_data = val_split( 209 | x_r, t_r, u_b, x_b, t_b, u_i, x_i, t_i, split=0.1) 210 | 211 | train_data_begin = [t_ri.to(device), x_ri.to(device)] 212 | lb = [0, 0] 213 | ub = [1, 1] 214 | N_plotting = DEFAULT_CONFIG['N_plotting'] 215 | epochs = DEFAULT_CONFIG['epochs'] 216 | 217 | train(net, train_data, val_data, train_data_begin, epochs=epochs) 218 | 219 | writer.flush() 220 | writer.close() 221 | 222 | net.net.load_state_dict(torch.load("model_9000.pt")) 223 | plot1dgrid_real(lb, ub, N_plotting, net, 10000, show=True) 224 | -------------------------------------------------------------------------------- /pinn_training2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from tqdm import tqdm 4 | import matplotlib.pyplot as plt 5 | from network2d import PINN 6 | import torch.functional as F 7 | from real_sol import real_sol 8 | from config import DEFAULT_CONFIG 9 | from dataset2D import * 10 | from torch.utils.tensorboard import SummaryWriter 11 | import cProfile 12 | 13 | writer = SummaryWriter() 14 | 15 | ############################################################## POINTS PLOTTING ############################################################# 16 | ############################################################################################################################################ 17 | 18 | 19 | def plot_training_points(t_0, t_b, t_r, x_0, x_b, x_r, y_0, y_b, y_r, u_0, u_b): 20 | """ 21 | Input: -dimension = spatial dimension 22 | -time_x = [t_0,t_b,t_r,x_0,x_b,x_r,u_0,u_b] 23 | Output: display training points in either 1,2 or 3D 24 | """ 25 | fig = plt.figure(figsize=(9, 6)) 26 | ax = fig.add_subplot(projection='3d') 27 | ax.scatter(t_0, x_0[:, 0], y_0[:, 0], c=u_0, marker='X', vmin=-1, vmax=1) 28 | ax.scatter(t_b, x_b[:, 0], y_b[:, 0], c=u_b, marker='X', vmin=-1, vmax=1) 29 | ax.scatter(t_r, x_r[:, 0], y_r[:, 0], c='r', marker='.', alpha=0.1) 30 | ax.set_xlabel('$t$') 31 | ax.set_ylabel('$x1$') 32 | ax.set_zlabel('$y1$') 33 | ax.set_title('Positions of collocation points and boundary data') 34 | plt.show() 35 | 36 | ########################################################## PLOTTING FUNCTIONS ########################################################### 37 | ########################################################################################################################################## 38 | 39 | 40 | def plot1dgrid_real(lb, ub, N, model, k, with_rnn=False): 41 | """Same for the real solution""" 42 | model = model.net 43 | x1space = np.linspace(lb[0], ub[0], N) 44 | y1space = np.linspace(lb[0], ub[0], N) 45 | tspace = np.linspace(lb[1], ub[1], N) 46 | T, X1, Y1 = np.meshgrid(tspace, x1space, y1space) 47 | T = torch.from_numpy(T).view(1, N**3, 1).to(device).float() 48 | X1 = torch.from_numpy(X1).view(1, N**3, 1).to(device).float() 49 | Y1 = torch.from_numpy(Y1).view(1, N**3, 1).to(device).float() 50 | if not with_rnn: 51 | T = T.transpose(0, 1).squeeze(-1) 52 | X1 = X1.transpose(0, 1).squeeze(-1) 53 | Y1 = Y1.transpose(0, 1).squeeze(-1) 54 | else: 55 | T = T.transpose(0, 1) 56 | X1 = X1.transpose(0, 1) 57 | Y1 = Y1.transpose(0, 1) 58 | upred = model(torch.cat((X1, Y1, T), 1)) 59 | U = torch.squeeze(upred).detach().cpu().numpy() 60 | U = upred.view(N, N, N).detach().cpu().numpy() 61 | T, X1 = T.view(N, N, N).detach().cpu().numpy(), X1.view( 62 | N, N, N).detach().cpu().numpy() 63 | Y1 = Y1.view(N, N, N).detach().cpu().numpy() 64 | z_array = np.zeros((N, N, N)) 65 | for i in range(N): 66 | z_array[:, :, i] = U[i] 67 | 68 | fig = plt.figure() 69 | ax = fig.add_subplot(projection='3d') 70 | ax.scatter(T, X1, Y1, c=U, marker='X') 71 | ax.set_xlabel('$t$') 72 | ax.set_ylabel('$x1$') 73 | 74 | plt.savefig(f'results/real_sol_{k}') 75 | writer.add_figure(f'real_sol_{k}', fig) 76 | plt.close() 77 | 78 | # Plot train and val losses on same figure 79 | 80 | 81 | def plot_loss(train_losses, val_losses, accuracy): 82 | fig, (ax1, ax2) = plt.subplots(2, 1) 83 | plt.style.use('dark_background') 84 | ax1.plot(train_losses, label='train') 85 | ax1.plot(val_losses, label='val') 86 | ax2.plot(accuracy, label="accuracy", color='red') 87 | 88 | ax1.set(ylabel='Loss') 89 | ax2.set(ylabel='Error') 90 | plt.xlabel('Epoch') 91 | 92 | ax1.legend() 93 | ax2.legend() 94 | plt.savefig(f'results/loss') 95 | plt.close() 96 | 97 | ########################################################### TRAINING ########################################################### 98 | ################################################################################################################################ 99 | 100 | 101 | def train(model, train_data, val_data, train_data_begin, 102 | epochs): 103 | epochs = tqdm(range(epochs), desc="Training") 104 | losses = [] 105 | val_losses = [] 106 | acc = [] 107 | for epoch in epochs: 108 | # Shuffle train_data 109 | index_shuf_r = torch.randperm(train_data[0].shape[0]) 110 | index_shuf_b = torch.randperm(train_data[3].shape[0]) 111 | index_shuf_i = torch.randperm(train_data[5].shape[0]) 112 | x_r_train = train_data[0][index_shuf_r].to(device) 113 | y_r_train = train_data[1][index_shuf_r].to(device) 114 | t_r_train = train_data[2][index_shuf_r].to(device) 115 | u_b_train = train_data[3][index_shuf_b].to(device) 116 | x_b_train = train_data[4][index_shuf_b].to(device) 117 | y_b_train = train_data[5][index_shuf_b].to(device) 118 | t_b_train = train_data[6][index_shuf_b].to(device) 119 | u_i_train = train_data[7][index_shuf_i].to(device) 120 | x_i_train = train_data[8][index_shuf_i].to(device) 121 | y_i_train = train_data[9][index_shuf_i].to(device) 122 | t_i_train = train_data[10][index_shuf_i].to(device) 123 | train_data_new = [x_r_train, y_r_train, t_r_train, u_b_train, 124 | x_b_train, y_b_train, t_b_train, u_i_train, x_i_train, y_i_train, t_i_train] 125 | # Shuffle train_data_begin 126 | index_shuf_b = torch.randperm(train_data_begin[0].shape[0]) 127 | x_b_train = train_data_begin[1][index_shuf_b].to(device) 128 | y_b_train = train_data_begin[2][index_shuf_b].to(device) 129 | t_b_train = train_data_begin[0][index_shuf_b].to(device) 130 | 131 | train_data_begin = [t_b_train, x_b_train, y_b_train] 132 | train_data = train_data_new 133 | if epoch < 1000: 134 | loss_begin = model.train_step(train_data_begin, phase="beginning") 135 | epochs.set_postfix(loss=loss_begin) 136 | else: 137 | loss_residual, loss_bords, loss_init, loss_bords_der, loss_trunc = model.train_step( 138 | train_data) 139 | loss = loss_residual + loss_bords + loss_init + loss_bords_der + loss_trunc 140 | val_loss = model.val_step(val_data) 141 | accuracy = model.accuracy_step(val_data) 142 | epochs.set_postfix(loss_residual=loss_residual, 143 | loss_bords=loss_bords, 144 | loss_init=loss_init, 145 | loss_bords_der=loss_bords_der, 146 | loss_trunc=loss_trunc, 147 | loss=loss, 148 | val_loss=val_loss, 149 | accuracy=accuracy) 150 | 151 | # Scheduler step 152 | model.scheduler.step(val_loss) 153 | 154 | # Append loss lists (and eventually log for Tensorboard) 155 | losses.append(loss) 156 | val_losses.append(val_loss) 157 | acc.append(accuracy) 158 | 159 | writer.add_scalar('Loss_residual', loss_residual, epoch) 160 | writer.add_scalar('Loss_bords', loss_bords, epoch) 161 | writer.add_scalar('Loss_init', loss_init, epoch) 162 | writer.add_scalar('Loss_bords_der', loss_bords_der, epoch) 163 | writer.add_scalar('Loss_trunc', loss_trunc, epoch) 164 | writer.add_scalar('Loss', loss, epoch) 165 | writer.add_scalar('Val_loss', val_loss, epoch) 166 | writer.add_scalar('Accuracy', accuracy, epoch) 167 | 168 | plot_loss(losses, val_losses, acc) 169 | 170 | if epoch % 100 == 0: 171 | plot1dgrid_real(lb, ub, N_plotting, model, epoch) 172 | 173 | if epoch % 1000 == 0: 174 | torch.save(model.net.state_dict(), f"results/model_{epoch}.pt") 175 | 176 | 177 | if __name__ == '__main__': 178 | seed = 42 179 | torch.manual_seed(seed) 180 | np.random.seed(seed) 181 | torch.backends.cudnn.deterministic = True 182 | torch.backends.cudnn.benchmark = True 183 | 184 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 185 | print(device) 186 | 187 | N_i, N_b, N_r = DEFAULT_CONFIG['N_i'], DEFAULT_CONFIG['N_b'], DEFAULT_CONFIG['N_r'] 188 | l_b, u_b = DEFAULT_CONFIG['l_b'], DEFAULT_CONFIG['u_b'] 189 | N_neurons, N_layers = DEFAULT_CONFIG['N_neurons'], DEFAULT_CONFIG['N_layers'] 190 | 191 | with_rnn = False 192 | net = PINN(with_rnn=with_rnn, N_neurons=N_neurons, N_layers=N_layers) 193 | # net._model_summary() 194 | 195 | # Write config to tensorboard 196 | writer.add_text('Config', str(DEFAULT_CONFIG)) 197 | 198 | t_ri, x_ri, y_ri = define_points_begin(10000, l_b, u_b) 199 | t_i, x_i, y_i, u_i, t_b, x_b, y_b, u_b, t_r, x_r, y_r = define_points( 200 | N_i, N_b, N_r, l_b, u_b) 201 | # x_r,t_r,u_b,x_b,t_b,u_i,x_i,t_i = normalize_data(x_r,t_r, 202 | # u_b,x_b,t_b, 203 | # u_i,x_i,t_i) 204 | plot_training_points(t_i.data.numpy(), 205 | t_b.data.numpy(), 206 | t_r.data.numpy(), 207 | x_i.data.numpy(), 208 | x_b.data.numpy(), 209 | x_r.data.numpy(), 210 | y_i.data.numpy(), 211 | y_b.data.numpy(), 212 | y_r.data.numpy(), 213 | u_i.data.numpy(), 214 | u_b.data.numpy()) 215 | 216 | train_data, val_data = val_split( 217 | x_r, y_r, t_r, u_b, x_b, y_b, t_b, u_i, x_i, y_i, t_i, split=0.1) 218 | 219 | train_data_begin = [t_ri.to(device), x_ri.to(device), y_ri.to(device)] 220 | lb = [0, 0] 221 | ub = [1, 1] 222 | N_plotting = DEFAULT_CONFIG['N_plotting'] 223 | epochs = DEFAULT_CONFIG['epochs'] 224 | 225 | train(net, train_data, val_data, train_data_begin, epochs=epochs) 226 | 227 | writer.flush() 228 | writer.close() 229 | 230 | net.net.load_state_dict(torch.load("model_9000.pt")) 231 | plot1dgrid_real(lb, ub, N_plotting, net, 10000, show=True) 232 | -------------------------------------------------------------------------------- /variational_network2D.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from torch.autograd import Variable 6 | from torch import optim 7 | from torch.autograd import grad 8 | from itertools import chain 9 | import torchsummary 10 | from real_sol import real_sol 11 | from vrac.bails_sombres import RNN, Transformer 12 | from variable_speed import c_fun 13 | from config import DEFAULT_CONFIG 14 | import numpy as np 15 | from dataset import * 16 | import matplotlib.pyplot as plt 17 | from tqdm import tqdm 18 | from torch.utils.tensorboard import SummaryWriter 19 | from getGIF import create_gif 20 | 21 | # Our following code was in 1D and we want to make it 2D 22 | 23 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 24 | 25 | writer = SummaryWriter() 26 | 27 | 28 | def plot1dgrid_real(lb, ub, N, model, k): 29 | """Same for the real solution""" 30 | x1space = np.linspace(lb[0], ub[0], N) 31 | # We add y coordinates to make it 2D 32 | y1space = np.linspace(lb[0], ub[0], N) 33 | tspace = np.linspace(lb[1], ub[1], N) 34 | X1, Y1, T = np.meshgrid(tspace, x1space, y1space) 35 | 36 | T = torch.from_numpy(T).view(1, N**3, 1).to(device).float() 37 | X1 = torch.from_numpy(X1).view(1, N**3, 1).to(device).float() 38 | Y1 = torch.from_numpy(Y1).view(1, N**3, 1).to(device).float() 39 | 40 | T = T.transpose(0, 1).squeeze(-1) 41 | X1 = X1.transpose(0, 1).squeeze(-1) 42 | Y1 = Y1.transpose(0, 1).squeeze(-1) 43 | 44 | upred = model(torch.cat((X1, Y1, T), 1)) 45 | U = torch.squeeze(upred).detach().cpu().numpy() 46 | U = upred.view(N, N, N).detach().cpu().numpy() 47 | 48 | T, X1, Y1 = T.view(N, N, N).detach().cpu().numpy(), X1.view( 49 | N, N, N).detach().cpu().numpy(), Y1.view(N, N, N).detach().cpu().numpy() 50 | 51 | z_array = np.zeros((N, N, N)) 52 | for i in range(N): 53 | z_array[:, :, i] = U[i] 54 | 55 | for j in tqdm(range(len(T)), desc='Plotting for each time step of epoch {} '.format(k)): 56 | fig = plt.figure() 57 | ax = fig.add_subplot(projection='3d') 58 | 59 | ax.plot_surface(X1[:, :, j], Y1[:, :, j], 60 | z_array[:, :, j], cmap='viridis') 61 | # ax.scatter(X1, Y1, U[j], c=z_array[j], marker='X') 62 | ax.set_xlabel('$x1$') 63 | ax.set_ylabel('$y1$') 64 | 65 | plt.savefig(f'results2Dnew/epoch_{k}/t_{j}') 66 | writer.add_figure(f't_{j}', fig) 67 | plt.close() 68 | 69 | 70 | def plot_loss(train_losses, val_losses): 71 | fig, ax1 = plt.subplots(1, 1) 72 | plt.style.use('dark_background') 73 | ax1.plot(train_losses, label='train') 74 | ax1.plot(val_losses, label='val') 75 | 76 | ax1.set(ylabel='Loss') 77 | plt.xlabel('Epoch') 78 | 79 | ax1.legend() 80 | plt.savefig(f'results2Dnew/loss') 81 | plt.close() 82 | 83 | # Réseau de neurones 84 | 85 | 86 | class CubicReLU(nn.Module): 87 | def __init__(self): 88 | super(CubicReLU, self).__init__() 89 | self.device = torch.device( 90 | "cuda" if torch.cuda.is_available() else "cpu") 91 | 92 | def forward(self, x): 93 | return torch.max(torch.tensor(0.0, device=self.device), x) ** 3 94 | 95 | 96 | class network(torch.jit.ScriptModule): 97 | def __init__(self, N_neurons, N_layers): 98 | super().__init__() 99 | self.num_neurons = N_neurons 100 | self.num_layers = N_layers 101 | self.linear_input = nn.Linear(3, self.num_neurons) # 3 inputs 102 | self.linear_hidden = nn.ModuleList( 103 | [nn.Linear(self.num_neurons, self.num_neurons) for _ in range(self.num_layers)]) 104 | self.linear_output = nn.Linear(self.num_neurons, 1) 105 | self.activation = CubicReLU() # nn.Tanh() if not working 106 | 107 | def forward(self, x): 108 | x = self.activation(self.linear_input(x)) 109 | for i, linear in enumerate(self.linear_hidden): 110 | x = self.activation(linear(x)) 111 | x = self.linear_output(x) 112 | return x 113 | 114 | 115 | class PINN(): 116 | def __init__(self, segments, N_neurons=64, N_layers=4): 117 | self.net = network(N_neurons, N_layers).to(device) 118 | self.optimizer = optim.Adam( 119 | self.net.parameters(), lr=DEFAULT_CONFIG['lr']) # descente de gradient 120 | self.scheduler = optim.lr_scheduler.CosineAnnealingLR( 121 | self.optimizer, T_max=DEFAULT_CONFIG['epochs']) 122 | self.loss_history = [] 123 | self.loss_history_val = [] 124 | self.segments = segments 125 | 126 | def _model_summary(self): 127 | print(torchsummary.summary(self.net, [(32, 1), (32, 1)])) 128 | 129 | # Calculer résidu 130 | def nth_gradient(self, f, wrt, n): 131 | for i in range(n): 132 | f = list(chain(*f)) 133 | grads = grad(f, wrt, create_graph=True, allow_unused=True,)[0] 134 | f = grads 135 | if grads is None: 136 | print("Bad Grad") 137 | return None 138 | return grads 139 | 140 | # The following code returns the laplacian of the function f 141 | # But when computing, we have NaN gradients. How to fix this? 142 | 143 | def calculate_laplacian(self, model, tensor): 144 | torch.autograd.set_detect_anomaly(True) 145 | laplacian_x = torch.zeros(tensor.shape[0], 1, device=device) 146 | laplacian_y = torch.zeros(tensor.shape[0], 1, device=device) 147 | laplacian_t = torch.zeros(tensor.shape[0], 1, device=device) 148 | for i, tensori in enumerate(tensor): 149 | hess = torch.autograd.functional.hessian( 150 | model, tensori.unsqueeze(0), create_graph=True) 151 | hess = hess.view(3, 3) 152 | laplacian_x[i] = hess[0, 0] 153 | laplacian_y[i] = hess[1, 1] 154 | laplacian_t[i] = hess[2, 2] 155 | return laplacian_x, laplacian_y, laplacian_t 156 | 157 | def flat(self, x): 158 | m = x.shape[0] 159 | return [x[i] for i in range(m)] 160 | 161 | def dist(self, x1, y1, t1, x2, y2, t2): 162 | return torch.sqrt((x1-x2)**2+(t1-t2)**2+(y1-y2)**2) 163 | 164 | def linseg(self, x, y, t, x1, y1, t1, x2, y2, t2): 165 | L = self.dist(x1, y1, t1, x2, y2, t2) 166 | xc = (x1+x2)*0.5 167 | tc = (t1+t2)*0.5 168 | yc = (y1+y2)*0.5 169 | # f = (1/L)*((x-x1)*(t2-t1) -(t-t1)*(x2-x1) ) 170 | # Change the signed distance f with now 3 dimensions 171 | f = (1/L)*((x-x1)*(t2-t1) - (t-t1)*(x2-x1) + 172 | (y-y1)*(t2-t1) - (t-t1)*(y2-y1)) 173 | t = (1/L)*((L/2.)**2 - self.dist(x, y, t, xc, yc, tc)**2) 174 | varphi = torch.sqrt(t**2 + f**4) 175 | phi = torch.sqrt(f**2 + 0.25*(varphi-t)**2) 176 | return phi 177 | 178 | def phi(self, x, y, t): # segments is an array of all the segments composing the boundary 179 | m = 1. 180 | R = 0. 181 | for i in range(len(self.segments[:, 0])): 182 | phi_v = self.linseg( 183 | x, y, t, self.segments[i, 0], self.segments[i, 1], self.segments[i, 2], self.segments[i, 3], self.segments[i, 4], self.segments[i, 5]) 184 | R += 1./phi_v**m 185 | R = 1/R**(1/m) 186 | return R 187 | 188 | def u(self, z): 189 | x, y, t = z[:, 0], z[:, 1], z[:, 2] 190 | x, y, t = x.unsqueeze(1), y.unsqueeze(1), t.unsqueeze(1) 191 | w = self.phi(x, y, t)*self.net(z) 192 | # add initial condition of torch.sin(np.pi*x) + 0.5*torch.sin(4*np.pi*x) for t = 0 193 | # w[t == 0] += torch.sin(np.pi*x[t == 0]) + 0.5 * \ 194 | # torch.sin(4*np.pi*x[t == 0]) 195 | # w += torch.exp(-t**2/0.1) * \ 196 | # (torch.sin(np.pi*x) + 0.5*torch.sin(np.pi*y)) 197 | return w 198 | 199 | def loss(self, x, y, t): 200 | x.requires_grad = True 201 | y.requires_grad = True 202 | t.requires_grad = True 203 | u_x_y_t = self.u(torch.cat((x, y, t), 1)) 204 | laplacian_u_x = self.nth_gradient(u_x_y_t, x, 2) 205 | laplacian_u_y = self.nth_gradient(u_x_y_t, y, 2) 206 | laplacian_u_t = self.nth_gradient(u_x_y_t, t, 2) 207 | 208 | # laplacian_u_x, laplacian_u_y, laplacian_u_t = self.calculate_laplacian( 209 | # self.u, torch.cat((x, y, t), 1)) 210 | 211 | # wave equation 212 | f = laplacian_u_t - 4*(laplacian_u_x+laplacian_u_y) - 3 * \ 213 | (np.pi**2)*torch.sin(np.pi*x)*torch.sin(np.pi*t) 214 | loss = torch.mean(f ** 2) 215 | return loss 216 | 217 | def train(self, x, y, t, x_val, y_val, t_val, epochs=DEFAULT_CONFIG['epochs']): 218 | progress_bar = tqdm(range(epochs)) 219 | for epoch in progress_bar: 220 | self.optimizer.zero_grad() 221 | loss = self.loss(x, y, t) 222 | loss.backward() 223 | self.optimizer.step() 224 | self.scheduler.step() 225 | 226 | loss_val = self.loss(x_val, y_val, t_val) 227 | self.loss_history_val.append(loss_val.item()) 228 | if epoch % 50 == 0 and epoch != 0: 229 | parent_dir = 'C:/Users/ilyes/Documents/CS/ProjetMécaEtIA/PINN_torch' 230 | path = os.path.join(parent_dir, "results2Dnew/epoch_{epoch}") 231 | try: 232 | os.mkdir(path.format(epoch=epoch)) 233 | except OSError: 234 | print("") 235 | plot1dgrid_real([0, 0, 0], [1, 1, 1], 100, self.u, epoch) 236 | create_gif(epoch, 100) 237 | plot_loss(self.loss_history, self.loss_history_val) 238 | progress_bar.set_description( 239 | f"Loss: {loss.item():.4f}, Loss_val: {loss_val.item():.4f}") 240 | return self.loss_history, self.loss_history_val 241 | 242 | def predict(self, x, y, t): 243 | with torch.no_grad(): 244 | x.requires_grad = False 245 | y.requires_grad = False 246 | t.requires_grad = False 247 | u = self.u(torch.cat((x, y, t), 1)) 248 | return u 249 | 250 | 251 | segments = torch.tensor( 252 | [[0, 0, 0, 1], [0, 1, 1, 1], [1, 1, 1, 0], [1, 0, 0, 0]], device=device) 253 | # Segments2 doit etre un tensor de la forme [[x1,y1,t1,x2,y2,t2],...] et represente les 8 sommets d'un cube de cote 1 254 | segments2 = torch.tensor([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [ 255 | 0, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1], [1, 1, 0, 1, 1, 1], [1, 0, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1]], device=device) 256 | 257 | 258 | PINN = PINN(segments2, N_neurons=16, N_layers=2) 259 | N_points = 20 260 | x = torch.linspace(0.01, 0.99, N_points, device=device).unsqueeze(1) 261 | y = torch.linspace(0.01, 0.99, N_points, device=device).unsqueeze(1) 262 | t = torch.linspace(0.01, 0.99, N_points, device=device).unsqueeze(1) 263 | 264 | x_train = x.repeat(N_points, 1) 265 | y_train = y.repeat(N_points, 1) 266 | t_train = t.repeat(N_points, 1).t().reshape(-1, 1) 267 | 268 | x_val = torch.linspace(0.01, 0.99, 100, device=device).unsqueeze(1) 269 | y_val = torch.linspace(0.01, 0.99, 100, device=device).unsqueeze(1) 270 | t_val = torch.linspace(0.01, 0.99, 100, device=device).unsqueeze(1) 271 | 272 | loss_history, loss_history_val = PINN.train( 273 | x, y, t, x_val, y_val, t_val, epochs=10000) 274 | 275 | plt.plot(loss_history) 276 | plt.plot(loss_history_val) 277 | plt.show() 278 | --------------------------------------------------------------------------------