├── Burgers ├── Burgers_Noise_free │ ├── Burgers.py │ ├── Initial.png │ ├── __pycache__ │ │ ├── models.cpython-36.pyc │ │ └── plotting.cpython-36.pyc │ ├── burgers_shock.mat │ ├── models.py │ └── plotting.py └── Burgers_Noisy │ ├── Burgers.py │ ├── Initial.png │ ├── __pycache__ │ ├── models.cpython-36.pyc │ └── plotting.cpython-36.pyc │ ├── burgers_shock.mat │ ├── models.py │ └── plotting.py ├── Darcy ├── Darcy_noise_free │ ├── Darcy.py │ ├── __pycache__ │ │ ├── models.cpython-36.pyc │ │ └── plotting.cpython-36.pyc │ ├── models.py │ ├── nonlinear2d_data.npz │ └── plotting.py └── Darcy_noisy │ ├── Darcy.py │ ├── __pycache__ │ ├── models.cpython-36.pyc │ └── plotting.cpython-36.pyc │ ├── models.py │ ├── nonlinear2d_data.npz │ └── plotting.py ├── ODE ├── ODE.py ├── ODE2000.mat ├── __pycache__ │ ├── models.cpython-36.pyc │ └── plotting.cpython-36.pyc ├── models.py └── plotting.py ├── README.md └── Tutorial ├── Entropic.png ├── GANs.png ├── Mode_Collapse.png ├── ODE2000.mat ├── UQPINNs_tutorial.ipynb ├── __pycache__ └── plotting.cpython-36.pyc └── plotting.py /Burgers/Burgers_Noise_free/Burgers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import sys 8 | sys.path.insert(0, './Utilities/') 9 | import os 10 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 11 | 12 | import matplotlib 13 | pgf_with_rc_fonts = {"pgf.texsystem": "pdflatex"} 14 | matplotlib.rcParams.update(pgf_with_rc_fonts) 15 | 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | plt.switch_backend('agg') 19 | from pyDOE import lhs 20 | from models import Burgers_UQPINN 21 | 22 | import scipy.io 23 | from scipy.interpolate import griddata 24 | from pyDOE import lhs 25 | from plotting import newfig, savefig 26 | from mpl_toolkits.mplot3d import Axes3D 27 | import time 28 | import matplotlib.gridspec as gridspec 29 | from mpl_toolkits.axes_grid1 import make_axes_locatable 30 | 31 | np.random.seed(1234) 32 | 33 | if __name__ == "__main__": 34 | 35 | # Number of collocation points 36 | N_f = 10000 37 | 38 | # Number of training data on the boundary (Boundary condition) 39 | N_b = 100 40 | 41 | # Number of training data for the initial condition 42 | N_i = 50 43 | N_u = N_b + N_i 44 | 45 | # Dimension of input, output and latent variable 46 | X_dim = 1 47 | Y_dim = 1 48 | T_dim = 1 49 | Z_dim = 1 50 | 51 | # Noise level (in this noise free case is zero) 52 | noise = 0. 53 | 54 | # Noise free initial condition 55 | def f_initial(X): 56 | return - np.sin(np.pi * X) 57 | 58 | # x and t on the initial condition 59 | X_i = -1 + 2*np.random.random((N_i))[:,None] 60 | X_i = np.sort(X_i, axis=0) 61 | T_i = np.zeros((N_i))[:,None] 62 | 63 | # x and t on the boundary condition 64 | X_b1 = np.ones((N_b // 2))[:,None] 65 | X_b2 = - np.ones((N_b // 2))[:,None] 66 | T_b1 = np.random.random(N_b // 2)[:,None] 67 | T_b2 = np.random.random(N_b // 2)[:,None] 68 | X_b = np.vstack((X_b1, X_b2)) 69 | T_b = np.vstack((T_b1, T_b2)) 70 | 71 | # x and t for training points (initial condition + boundary condition) 72 | X_u = np.vstack((X_i, X_b)) 73 | T_u = np.vstack((T_i, T_b)) 74 | 75 | # x and t for collocation points 76 | X_f = -1 + 2*np.random.random((N_f))[:,None] 77 | T_f = np.random.random((N_f))[:,None] 78 | 79 | # Generate stochastic initial condtion and the exact initial condition 80 | X_ii = np.linspace(-1., 1., 500) 81 | Y_ii = f_initial(X_ii) 82 | Y_i = f_initial(X_i) 83 | Y_refi = Y_i 84 | Y_i = Y_i + noise * np.std(Y_i)*np.random.randn(N_i,Y_dim) 85 | 86 | # Plot the exact initial condition with the data for the initial condition 87 | plt.figure(1, figsize=(6, 4)) 88 | plt.xticks(fontsize=11) 89 | plt.yticks(fontsize=11) 90 | plt.plot(X_ii, Y_ii, 'b-', label = "Exact", linewidth=2) 91 | plt.plot(X_i, Y_i, 'kx', label = "Non-noisy initial condition", alpha = 1.) 92 | plt.legend(loc='upper right', frameon=False, prop={'size': 11}) 93 | ax = plt.gca() 94 | plt.xlim(-1.0, 1.0) 95 | plt.xlabel('$x$',fontsize=11) 96 | plt.ylabel('$u(0, x)$',fontsize=11) 97 | plt.savefig('./Initial.png', dpi = 600) 98 | 99 | # data for training 100 | Y_b = np.zeros((N_b))[:,None] 101 | Y_u = np.vstack((Y_i, Y_b)) 102 | 103 | # Loading the reference solution of Burgers equation 104 | x = np.linspace(-1., 1., 256)[:,None] 105 | t = np.linspace(0., 1., 100)[:,None] 106 | X, T = np.meshgrid(x,t) 107 | XT = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 108 | X_star = XT[:,0][:,None] 109 | T_star = XT[:,1][:,None] 110 | 111 | data = scipy.io.loadmat('./burgers_shock.mat') 112 | Exact = np.real(data['usol']).T 113 | 114 | # Model creation 115 | layers_P = np.array([X_dim+T_dim+Z_dim,50,50,50,50,Y_dim]) 116 | layers_Q = np.array([X_dim+T_dim+Y_dim,50,50,50,50,Z_dim]) 117 | layers_T = np.array([X_dim+T_dim+Y_dim,50,50,50,1]) 118 | 119 | model = Burgers_UQPINN(X_f, T_f, X_u, T_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.5, beta = 1.) 120 | 121 | model.train(nIter = 30000, N_u = N_u, N_f = N_f, batch_size_u = N_u, batch_size_f = N_f) 122 | 123 | # Prediction 124 | N_samples = 500 125 | samples_mean = np.zeros((X_star.shape[0], N_samples)) 126 | for i in range(0, N_samples): 127 | samples_mean[:,i:i+1] = model.generate_sample(X_star, T_star) 128 | 129 | # Compare mean and variance of the predicted samples as prediction and uncertainty 130 | U_pred = np.mean(samples_mean, axis = 1) 131 | U_pred = griddata(XT, U_pred.flatten(), (X, T), method='cubic') 132 | Sigma_pred = np.var(samples_mean, axis = 1) 133 | Sigma_pred = griddata(XT, Sigma_pred.flatten(), (X, T), method='cubic') 134 | 135 | # Compare the relative error between the prediciton and the reference solution 136 | error_u = np.linalg.norm(Exact-U_pred,2)/np.linalg.norm(Exact,2) 137 | print('Error u: %e' % (error_u)) 138 | np.save('L2_error.npy', error_u) 139 | 140 | ###################################################################### 141 | ############################# Plotting ############################### 142 | ###################################################################### 143 | 144 | fig, ax = newfig(1.0, 1.1) 145 | ax.axis('off') 146 | 147 | ####### Row 0: u(t,x) ################## 148 | gs0 = gridspec.GridSpec(1, 2) 149 | gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 150 | ax = plt.subplot(gs0[:, :]) 151 | 152 | h = ax.imshow(U_pred.T, interpolation='nearest', cmap='rainbow', 153 | extent=[t.min(), t.max(), x.min(), x.max()], 154 | origin='lower', aspect='auto') 155 | divider = make_axes_locatable(ax) 156 | cax = divider.append_axes("right", size="5%", pad=0.05) 157 | fig.colorbar(h, cax=cax) 158 | 159 | ax.plot(T_u, X_u, 'kx', label = 'Data (%d points)' % (Y_u.shape[0]), markersize = 4, clip_on = False) 160 | 161 | line = np.linspace(x.min(), x.max(), 2)[:,None] 162 | ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1) 163 | ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1) 164 | ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1) 165 | 166 | ax.set_xlabel('$t$') 167 | ax.set_ylabel('$x$') 168 | ax.legend(frameon=False, loc = 'best') 169 | ax.set_title('$u(t,x)$', fontsize = 10) 170 | 171 | 172 | ####### Row 1: u(t,x) slices ################## 173 | gs1 = gridspec.GridSpec(1, 3) 174 | gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5) 175 | 176 | ax = plt.subplot(gs1[0, 0]) 177 | ax.plot(x,Exact[25,:], 'b-', linewidth = 2, label = 'Exact') 178 | ax.plot(x,U_pred[25,:], 'r--', linewidth = 2, label = 'Prediction') 179 | lower = U_pred[25,:] - 2.0*np.sqrt(Sigma_pred[25,:]) 180 | upper = U_pred[25,:] + 2.0*np.sqrt(Sigma_pred[25,:]) 181 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 182 | facecolor='orange', alpha=0.5, label="Two std band") 183 | ax.set_xlabel('$x$') 184 | ax.set_ylabel('$u(t,x)$') 185 | ax.set_title('$t = 0.25$', fontsize = 10) 186 | ax.axis('square') 187 | ax.set_xlim([-1.1,1.1]) 188 | ax.set_ylim([-1.1,1.1]) 189 | 190 | ax = plt.subplot(gs1[0, 1]) 191 | ax.plot(x,Exact[50,:], 'b-', linewidth = 2, label = 'Exact') 192 | ax.plot(x,U_pred[50,:], 'r--', linewidth = 2, label = 'Prediction') 193 | lower = U_pred[50,:] - 2.0*np.sqrt(Sigma_pred[50,:]) 194 | upper = U_pred[50,:] + 2.0*np.sqrt(Sigma_pred[50,:]) 195 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 196 | facecolor='orange', alpha=0.5, label="Two std band") 197 | ax.set_xlabel('$x$') 198 | ax.set_ylabel('$u(t,x)$') 199 | ax.axis('square') 200 | ax.set_xlim([-1.1,1.1]) 201 | ax.set_ylim([-1.1,1.1]) 202 | ax.set_title('$t = 0.50$', fontsize = 10) 203 | ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.35), ncol=5, frameon=False) 204 | 205 | ax = plt.subplot(gs1[0, 2]) 206 | ax.plot(x,Exact[75,:], 'b-', linewidth = 2, label = 'Exact') 207 | ax.plot(x,U_pred[75,:], 'r--', linewidth = 2, label = 'Prediction') 208 | lower = U_pred[75,:] - 2.0*np.sqrt(Sigma_pred[75,:]) 209 | upper = U_pred[75,:] + 2.0*np.sqrt(Sigma_pred[75,:]) 210 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 211 | facecolor='orange', alpha=0.5, label="Two std band") 212 | ax.set_xlabel('$x$') 213 | ax.set_ylabel('$u(t,x)$') 214 | ax.axis('square') 215 | ax.set_xlim([-1.1,1.1]) 216 | ax.set_ylim([-1.1,1.1]) 217 | ax.set_title('$t = 0.75$', fontsize = 10) 218 | savefig('./Prediction') 219 | 220 | 221 | fig, ax = newfig(1.0) 222 | ax.axis('off') 223 | 224 | ############# Uncertainty ################## 225 | gs2 = gridspec.GridSpec(1, 2) 226 | gs2.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 227 | ax = plt.subplot(gs2[:, :]) 228 | 229 | h = ax.imshow(Sigma_pred.T, interpolation='nearest', cmap='rainbow', 230 | extent=[t.min(), t.max(), x.min(), x.max()], 231 | origin='lower', aspect='auto') 232 | divider = make_axes_locatable(ax) 233 | cax = divider.append_axes("right", size="5%", pad=0.05) 234 | fig.colorbar(h, cax=cax) 235 | ax.set_xlabel('$t$') 236 | ax.set_ylabel('$x$') 237 | ax.legend(frameon=False, loc = 'best') 238 | ax.set_title('Variance of $u(t,x)$', fontsize = 10) 239 | savefig('./Variance') 240 | 241 | 242 | 243 | 244 | 245 | -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/Initial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noise_free/Initial.png -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noise_free/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noise_free/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/burgers_shock.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noise_free/burgers_shock.mat -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import timeit 10 | 11 | class Burgers_UQPINN: 12 | # Initialize the class 13 | def __init__(self, X_f, T_f, X_u, T_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.0, beta = 1.0): 14 | 15 | # Normalize data 16 | self.Xmean, self.Xstd = X_f.mean(0), X_f.std(0) 17 | self.Tmean, self.Tstd = T_f.mean(0), T_f.std(0) 18 | self.Ymean, self.Ystd = Y_u.mean(0), Y_u.std(0) 19 | X_f = (X_f - self.Xmean) / self.Xstd 20 | X_u = (X_u - self.Xmean) / self.Xstd 21 | T_f = (T_f - self.Tmean) / self.Tstd 22 | T_u = (T_u - self.Tmean) / self.Tstd 23 | 24 | # Jacobian of the PDE because of normalization 25 | self.Jacobian_X = 1 / self.Xstd 26 | self.Jacobian_T = 1 / self.Tstd 27 | 28 | 29 | self.X_f = X_f 30 | self.X_u = X_u 31 | self.T_f = T_f 32 | self.T_u = T_u 33 | self.Y_u = Y_u 34 | 35 | self.layers_P = layers_P 36 | self.layers_Q = layers_Q 37 | self.layers_T = layers_T 38 | 39 | self.X_dim = X_u.shape[1] 40 | self.T_dim = T_u.shape[1] 41 | self.Y_dim = Y_u.shape[1] 42 | self.Z_dim = layers_Q[-1] 43 | self.lam = lam 44 | self.beta = beta 45 | 46 | self.k1 = 1 47 | self.k2 = 5 48 | 49 | # Initialize network weights and biases 50 | self.weights_P, self.biases_P = self.initialize_NN(layers_P) 51 | self.weights_Q, self.biases_Q = self.initialize_NN(layers_Q) 52 | self.weights_T, self.biases_T = self.initialize_NN(layers_T) 53 | 54 | # Define Tensorflow session 55 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 56 | 57 | # Define placeholders and computational graph 58 | self.X_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 59 | self.X_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 60 | self.T_u_tf = tf.placeholder(tf.float32, shape=(None, self.T_dim)) 61 | self.T_f_tf = tf.placeholder(tf.float32, shape=(None, self.T_dim)) 62 | self.Y_u_tf = tf.placeholder(tf.float32, shape=(None, self.Y_dim)) 63 | self.Z_u_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 64 | self.Z_f_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 65 | 66 | self.Y_u_pred = self.net_P(self.X_u_tf, self.T_u_tf, self.Z_u_tf) 67 | self.Y_f_pred = self.get_r(self.X_f_tf, self.T_f_tf, self.Z_f_tf) 68 | 69 | # Generator loss (to be minimized) 70 | self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss = self.compute_generator_loss(self.X_u_tf, self.T_u_tf, self.Y_u_tf, 71 | self.Y_u_pred, self.X_f_tf, self.T_f_tf, self.Y_f_pred, self.Z_u_tf, self.Z_f_tf) 72 | 73 | # Discriminator loss (to be minimized) 74 | self.T_loss = self.compute_discriminator_loss(self.X_u_tf, self.T_u_tf, self.Y_u_tf, self.Z_u_tf) 75 | 76 | # Generator samples of y given x and t 77 | self.sample = self.sample_generator(self.X_u_tf, self.T_u_tf, self.Z_u_tf) 78 | 79 | # Define optimizer 80 | self.optimizer_KL = tf.train.AdamOptimizer(1e-4) 81 | self.optimizer_T = tf.train.AdamOptimizer(1e-4) 82 | 83 | # Define train Ops 84 | self.train_op_KL = self.optimizer_KL.minimize(self.G_loss, 85 | var_list = [self.weights_P, self.biases_P, 86 | self.weights_Q, self.biases_Q]) 87 | 88 | self.train_op_T = self.optimizer_T.minimize(self.T_loss, 89 | var_list = [self.weights_T, self.biases_T]) 90 | 91 | # Initialize Tensorflow variables 92 | init = tf.global_variables_initializer() 93 | self.sess.run(init) 94 | 95 | 96 | # Initialize network weights and biases using Xavier initialization 97 | def initialize_NN(self, layers): 98 | # Xavier initialization 99 | def xavier_init(size): 100 | in_dim = size[0] 101 | out_dim = size[1] 102 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 103 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, dtype=tf.float32) 104 | 105 | weights = [] 106 | biases = [] 107 | num_layers = len(layers) 108 | for l in range(0,num_layers-1): 109 | W = xavier_init(size=[layers[l], layers[l+1]]) 110 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 111 | weights.append(W) 112 | biases.append(b) 113 | return weights, biases 114 | 115 | 116 | # Evaluates the forward pass 117 | def forward_pass(self, H, layers, weights, biases): 118 | num_layers = len(layers) 119 | for l in range(0,num_layers-2): 120 | W = weights[l] 121 | b = biases[l] 122 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 123 | W = weights[-1] 124 | b = biases[-1] 125 | H = tf.add(tf.matmul(H, W), b) 126 | return H 127 | 128 | # right hand side terms of the PDE (in this case are zero) 129 | def f(self, X_normalized): 130 | return tf.zeros_like(X_normalized) 131 | 132 | # Decoder: p(y|x,z) 133 | def net_P(self, X, T, Z): 134 | Y = self.forward_pass(tf.concat([X, T, Z], 1), 135 | self.layers_P, 136 | self.weights_P, 137 | self.biases_P) 138 | return Y 139 | 140 | # Encoder: q(z|x,y) 141 | def net_Q(self, X, T, Y): 142 | Z = self.forward_pass(tf.concat([X, T, Y], 1), 143 | self.layers_Q, 144 | self.weights_Q, 145 | self.biases_Q) 146 | return Z 147 | 148 | # Discriminator 149 | def net_T(self, X, T, Y): 150 | T = self.forward_pass(tf.concat([X, T, Y], 1), 151 | self.layers_T, 152 | self.weights_T, 153 | self.biases_T) 154 | return T 155 | 156 | # Physics-Informed neural network prediction 157 | def get_u(self, X, T, Z): 158 | z_prior = Z 159 | u = self.net_P(X, T, z_prior) 160 | return u 161 | 162 | # Physics-Informed residual on the collocation points 163 | def get_r(self, X, T, Z): 164 | z_prior = Z 165 | u = self.net_P(X, T, z_prior) 166 | u_t = tf.gradients(u, T)[0] 167 | u_x = tf.gradients(u, X)[0] 168 | u_xx = tf.gradients(u_x, X)[0] 169 | f = self.f(X) 170 | r = (self.Jacobian_T) * u_t + (self.Jacobian_X) * u * u_x - (0.01/np.pi) * (self.Jacobian_X ** 2) * u_xx - f 171 | return r 172 | 173 | # Compute the generator loss 174 | def compute_generator_loss(self, X_u, T_u, Y_u, Y_u_pred, X_f, T_f, Y_f_pred, Z_u, Z_f): 175 | # Prior: 176 | z_u_prior = Z_u 177 | z_f_prior = Z_f 178 | # Encoder: q(z|x,y) 179 | z_u_encoder = self.net_Q(X_u, T_u, Y_u_pred) 180 | z_f_encoder = self.net_Q(X_f, T_f, Y_f_pred) 181 | # Discriminator loss 182 | Y_pred = self.net_P(X_u, T_u, Z_u) 183 | T_pred = self.net_T(X_u, T_u, Y_pred) 184 | 185 | # KL-divergence between the data distribution and the model distribution 186 | KL = tf.reduce_mean(T_pred) 187 | 188 | # Entropic regularization 189 | log_q = - tf.reduce_mean(tf.square(z_u_prior-z_u_encoder)) 190 | 191 | # Physics-informed loss 192 | loss_f = tf.reduce_mean(tf.square(Y_f_pred)) 193 | 194 | # Generator loss 195 | loss = KL + (1.0-self.lam)*log_q + self.beta * loss_f 196 | 197 | return loss, KL, (1.0-self.lam)*log_q, self.beta * loss_f 198 | 199 | # Compute the discriminator loss 200 | def compute_discriminator_loss(self, X, T, Y, Z): 201 | # Prior: p(z) 202 | z_prior = Z 203 | # Decoder: p(y|x,z) 204 | Y_pred = self.net_P(X, T, z_prior) 205 | 206 | # Discriminator loss 207 | T_real = self.net_T(X, T, Y) 208 | T_fake = self.net_T(X, T, Y_pred) 209 | 210 | T_real = tf.sigmoid(T_real) 211 | T_fake = tf.sigmoid(T_fake) 212 | 213 | T_loss = -tf.reduce_mean(tf.log(1.0 - T_real + 1e-8) + \ 214 | tf.log(T_fake + 1e-8)) 215 | 216 | return T_loss 217 | 218 | 219 | # Fetches a mini-batch of data 220 | def fetch_minibatch(self, X_u, T_u, X_f, T_f, Y_u, N_batch_u, N_batch_f): 221 | N_u = X_u.shape[0] 222 | N_f = X_f.shape[0] 223 | idx_u = np.random.choice(N_u, N_batch_u, replace=False) 224 | idx_f = np.random.choice(N_f, N_batch_f, replace=False) 225 | X_u_batch = X_u[idx_u,:] 226 | T_u_batch = T_u[idx_u,:] 227 | X_f_batch = X_f[idx_f,:] 228 | T_f_batch = T_f[idx_f,:] 229 | Y_u_batch = Y_u[idx_u,:] 230 | return X_u_batch, T_u_batch, X_f_batch, T_f_batch, Y_u_batch 231 | 232 | 233 | # Trains the model 234 | def train(self, nIter = 20000, N_u = 300, N_f = 5000, batch_size_u = 50, batch_size_f = 500): 235 | 236 | start_time = timeit.default_timer() 237 | 238 | # Store generator loss and discriminator loss as function of iteration 239 | iteration = [] 240 | loss_D = [] 241 | loss_G = [] 242 | for it in range(nIter): 243 | # Fetch a mini-batch of data 244 | X_u_batch, T_u_batch, X_f_batch, T_f_batch, Y_u_batch = self.fetch_minibatch(self.X_u, self.T_u, self.X_f, self.T_f, 245 | self.Y_u, batch_size_u, batch_size_f) 246 | 247 | Z_u = np.random.randn(batch_size_u, 1) 248 | Z_f = np.random.randn(batch_size_f, 1) 249 | 250 | # Define a dictionary for associating placeholders with data 251 | tf_dict = {self.X_u_tf: X_u_batch, self.T_u_tf: T_u_batch, self.Y_u_tf: Y_u_batch, self.X_f_tf: X_f_batch, 252 | self.T_f_tf: T_f_batch, self.Z_u_tf: Z_u, self.Z_f_tf: Z_f} 253 | 254 | # Run the Tensorflow session to minimize the loss 255 | for i in range(self.k1): 256 | self.sess.run(self.train_op_T, tf_dict) 257 | for j in range(self.k2): 258 | self.sess.run(self.train_op_KL, tf_dict) 259 | 260 | # Print 261 | if it % 100 == 0: 262 | elapsed = timeit.default_timer() - start_time 263 | loss_GG, loss_KL_value, reconv, loss_PDE = self.sess.run([self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss], tf_dict) 264 | loss_T_value = self.sess.run(self.T_loss, tf_dict) 265 | 266 | iteration.append(it) 267 | loss_D.append(loss_T_value) 268 | loss_G.append(loss_GG) 269 | print('It: %d, KL_loss: %.2e, Recon_loss: %.2e, PDE_loss: %.2e, T_loss: %.2e, Time: %.2f' % 270 | (it, loss_KL_value, reconv, loss_PDE, loss_T_value, elapsed)) 271 | start_time = timeit.default_timer() 272 | 273 | # Generate samples of y given x by sampling from the latent space z 274 | def sample_generator(self, X, T, Z): 275 | # Prior: 276 | z_prior = Z 277 | # Decoder: p(y|x,z) 278 | Y_pred = self.net_P(X, T, z_prior) 279 | return Y_pred 280 | 281 | # Predict y given x 282 | def generate_sample(self, X_star, T_star): 283 | X_star = (X_star - self.Xmean) / self.Xstd 284 | T_star = (T_star - self.Tmean) / self.Tstd 285 | Z = np.random.randn(X_star.shape[0], 1) 286 | tf_dict = {self.X_u_tf: X_star, self.T_u_tf: T_star, self.Z_u_tf: Z} 287 | Y_star = self.sess.run(self.sample, tf_dict) 288 | Y_star = Y_star 289 | return Y_star 290 | 291 | def predict_f(self, X_star, T_star): 292 | # Center around the origin 293 | X_star = (X_star - self.Xmean) / self.Xstd 294 | T_star = (T_star - self.Tmean) / self.Tstd 295 | # Predict 296 | z_f = np.random.randn(X_star.shape[0], self.Z_dim) 297 | tf_dict = {self.X_f_tf: X_star[:,0:1], self.T_f_tf: T_star, self.Z_f_tf: z_f} 298 | f_star = self.sess.run(self.Y_f_pred, tf_dict) 299 | return f_star 300 | 301 | -------------------------------------------------------------------------------- /Burgers/Burgers_Noise_free/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/Burgers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import sys 8 | sys.path.insert(0, './Utilities/') 9 | import os 10 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 11 | 12 | import matplotlib 13 | pgf_with_rc_fonts = {"pgf.texsystem": "pdflatex"} 14 | matplotlib.rcParams.update(pgf_with_rc_fonts) 15 | 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | plt.switch_backend('agg') 19 | from pyDOE import lhs 20 | 21 | from models import Burgers_UQPINN 22 | 23 | import scipy.io 24 | from scipy.interpolate import griddata 25 | from pyDOE import lhs 26 | from plotting import newfig, savefig 27 | from mpl_toolkits.mplot3d import Axes3D 28 | import time 29 | import matplotlib.gridspec as gridspec 30 | from mpl_toolkits.axes_grid1 import make_axes_locatable 31 | 32 | np.random.seed(1234) 33 | 34 | if __name__ == "__main__": 35 | 36 | # Number of collocation points 37 | N_f = 10000 38 | 39 | # Number of training data on the boundary (Boundary condition) 40 | N_b = 100 41 | 42 | # Number of training data for the initial condition 43 | N_i = 50 44 | N_u = N_b + N_i 45 | 46 | # Dimension of input, output and latent variable 47 | X_dim = 1 48 | Y_dim = 1 49 | T_dim = 1 50 | Z_dim = 1 51 | 52 | # Noise level (in this noise free case is zero) 53 | err_var = 0.1 54 | 55 | # x and t on the initial condition 56 | X_i = -1 + 2*np.random.random((N_i))[:,None] 57 | X_i = np.sort(X_i, axis=0) 58 | T_i = np.zeros((N_i))[:,None] 59 | 60 | # x and t on the boundary condition 61 | X_b1 = np.ones((N_b // 2))[:,None] 62 | X_b2 = - np.ones((N_b // 2))[:,None] 63 | T_b1 = np.random.random(N_b // 2)[:,None] 64 | T_b2 = np.random.random(N_b // 2)[:,None] 65 | X_b = np.vstack((X_b1, X_b2)) 66 | T_b = np.vstack((T_b1, T_b2)) 67 | 68 | # x and t for training points (initial condition + boundary condition) 69 | X_u = np.vstack((X_i, X_b)) 70 | T_u = np.vstack((T_i, T_b)) 71 | 72 | # x and t for collocation points 73 | X_f = -1 + 2*np.random.random((N_f))[:,None] 74 | T_f = np.random.random((N_f))[:,None] 75 | 76 | # Noisy initial condition 77 | def f_initial(X): 78 | return - np.sin(np.pi * X) 79 | error = 1.0/np.exp(3.0*(abs(X_i)))*np.random.normal(0,err_var,X_i.size)[:,None] 80 | def ff(X, error): 81 | return -np.sin(np.pi*(X+2*error))+error 82 | 83 | X_ii = np.linspace(-1,1,500) 84 | Y_ii = f_initial(X_ii) 85 | Y_i = ff(X_i,error) 86 | 87 | # Plot the exact initial condition with the noisy data for the initial condition 88 | plt.figure(1, figsize=(6, 4)) 89 | plt.xticks(fontsize=11) 90 | plt.yticks(fontsize=11) 91 | plt.plot(X_ii, Y_ii, 'b-', label = "Exact", linewidth=2) 92 | plt.plot(X_i, Y_i, 'kx', label = "Noisy initial condition", alpha = 1.) 93 | plt.legend(loc='upper right', frameon=False, prop={'size': 11}) 94 | ax = plt.gca() 95 | plt.xlim(-1.0, 1.0) 96 | plt.xlabel('$x$',fontsize=11) 97 | plt.ylabel('$u(0, x)$',fontsize=11) 98 | plt.savefig('./Initial.png', dpi = 600) 99 | 100 | # data for training 101 | Y_b = np.zeros((N_b))[:,None] 102 | Y_u = np.vstack((Y_i, Y_b)) 103 | 104 | # Loading the reference solution of Burgers equation 105 | x = np.linspace(-1., 1., 256)[:,None] 106 | t = np.linspace(0., 1., 100)[:,None] 107 | X, T = np.meshgrid(x,t) 108 | XT = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 109 | X_star = XT[:,0][:,None] 110 | T_star = XT[:,1][:,None] 111 | 112 | data = scipy.io.loadmat('./burgers_shock.mat') 113 | Exact = np.real(data['usol']).T 114 | 115 | # Model creation 116 | layers_P = np.array([X_dim+T_dim+Z_dim,50,50,50,50,Y_dim]) 117 | layers_Q = np.array([X_dim+T_dim+Y_dim,50,50,50,50,Z_dim]) 118 | layers_T = np.array([X_dim+T_dim+Y_dim,50,50,50,1]) 119 | 120 | model = Burgers_UQPINN(X_f, T_f, X_u, T_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.5, beta = 1.) 121 | 122 | model.train(nIter = 30000, N_u = N_u, N_f = N_f, batch_size_u = N_u, batch_size_f = N_f) 123 | 124 | # Prediction 125 | N_samples = 500 126 | samples_mean = np.zeros((X_star.shape[0], N_samples)) 127 | for i in range(0, N_samples): 128 | samples_mean[:,i:i+1] = model.generate_sample(X_star, T_star) 129 | 130 | # Compare mean and variance of the predicted samples as prediction and uncertainty 131 | U_pred = np.mean(samples_mean, axis = 1) 132 | U_pred = griddata(XT, U_pred.flatten(), (X, T), method='cubic') 133 | Sigma_pred = np.var(samples_mean, axis = 1) 134 | Sigma_pred = griddata(XT, Sigma_pred.flatten(), (X, T), method='cubic') 135 | 136 | # Compare the relative error between the prediciton and the reference solution 137 | error_u = np.linalg.norm(Exact-U_pred,2)/np.linalg.norm(Exact,2) 138 | print('Error u: %e' % (error_u)) 139 | np.save('L2_error.npy', error_u) 140 | 141 | ###################################################################### 142 | ############################# Plotting ############################### 143 | ###################################################################### 144 | 145 | fig, ax = newfig(1.0, 1.1) 146 | ax.axis('off') 147 | 148 | ####### Row 0: u(t,x) ################## 149 | gs0 = gridspec.GridSpec(1, 2) 150 | gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 151 | ax = plt.subplot(gs0[:, :]) 152 | 153 | h = ax.imshow(U_pred.T, interpolation='nearest', cmap='rainbow', 154 | extent=[t.min(), t.max(), x.min(), x.max()], 155 | origin='lower', aspect='auto') 156 | divider = make_axes_locatable(ax) 157 | cax = divider.append_axes("right", size="5%", pad=0.05) 158 | fig.colorbar(h, cax=cax) 159 | 160 | ax.plot(T_u, X_u, 'kx', label = 'Data (%d points)' % (Y_u.shape[0]), markersize = 4, clip_on = False) 161 | 162 | line = np.linspace(x.min(), x.max(), 2)[:,None] 163 | ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1) 164 | ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1) 165 | ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1) 166 | 167 | ax.set_xlabel('$t$') 168 | ax.set_ylabel('$x$') 169 | ax.legend(frameon=False, loc = 'best') 170 | ax.set_title('$u(t,x)$', fontsize = 10) 171 | 172 | 173 | ####### Row 1: u(t,x) slices ################## 174 | gs1 = gridspec.GridSpec(1, 3) 175 | gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5) 176 | 177 | ax = plt.subplot(gs1[0, 0]) 178 | ax.plot(x,Exact[25,:], 'b-', linewidth = 2, label = 'Exact') 179 | ax.plot(x,U_pred[25,:], 'r--', linewidth = 2, label = 'Prediction') 180 | lower = U_pred[25,:] - 2.0*np.sqrt(Sigma_pred[25,:]) 181 | upper = U_pred[25,:] + 2.0*np.sqrt(Sigma_pred[25,:]) 182 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 183 | facecolor='orange', alpha=0.5, label="Two std band") 184 | ax.set_xlabel('$x$') 185 | ax.set_ylabel('$u(t,x)$') 186 | ax.set_title('$t = 0.25$', fontsize = 10) 187 | ax.axis('square') 188 | ax.set_xlim([-1.1,1.1]) 189 | ax.set_ylim([-1.1,1.1]) 190 | 191 | ax = plt.subplot(gs1[0, 1]) 192 | ax.plot(x,Exact[50,:], 'b-', linewidth = 2, label = 'Exact') 193 | ax.plot(x,U_pred[50,:], 'r--', linewidth = 2, label = 'Prediction') 194 | lower = U_pred[50,:] - 2.0*np.sqrt(Sigma_pred[50,:]) 195 | upper = U_pred[50,:] + 2.0*np.sqrt(Sigma_pred[50,:]) 196 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 197 | facecolor='orange', alpha=0.5, label="Two std band") 198 | ax.set_xlabel('$x$') 199 | ax.set_ylabel('$u(t,x)$') 200 | ax.axis('square') 201 | ax.set_xlim([-1.1,1.1]) 202 | ax.set_ylim([-1.1,1.1]) 203 | ax.set_title('$t = 0.50$', fontsize = 10) 204 | ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.35), ncol=5, frameon=False) 205 | 206 | ax = plt.subplot(gs1[0, 2]) 207 | ax.plot(x,Exact[75,:], 'b-', linewidth = 2, label = 'Exact') 208 | ax.plot(x,U_pred[75,:], 'r--', linewidth = 2, label = 'Prediction') 209 | lower = U_pred[75,:] - 2.0*np.sqrt(Sigma_pred[75,:]) 210 | upper = U_pred[75,:] + 2.0*np.sqrt(Sigma_pred[75,:]) 211 | plt.fill_between(x.flatten(), lower.flatten(), upper.flatten(), 212 | facecolor='orange', alpha=0.5, label="Two std band") 213 | ax.set_xlabel('$x$') 214 | ax.set_ylabel('$u(t,x)$') 215 | ax.axis('square') 216 | ax.set_xlim([-1.1,1.1]) 217 | ax.set_ylim([-1.1,1.1]) 218 | ax.set_title('$t = 0.75$', fontsize = 10) 219 | savefig('./Prediction') 220 | 221 | 222 | fig, ax = newfig(1.0) 223 | ax.axis('off') 224 | 225 | ############# Uncertainty ################## 226 | gs2 = gridspec.GridSpec(1, 2) 227 | gs2.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 228 | ax = plt.subplot(gs2[:, :]) 229 | 230 | h = ax.imshow(Sigma_pred.T, interpolation='nearest', cmap='rainbow', 231 | extent=[t.min(), t.max(), x.min(), x.max()], 232 | origin='lower', aspect='auto') 233 | divider = make_axes_locatable(ax) 234 | cax = divider.append_axes("right", size="5%", pad=0.05) 235 | fig.colorbar(h, cax=cax) 236 | ax.set_xlabel('$t$') 237 | ax.set_ylabel('$x$') 238 | ax.legend(frameon=False, loc = 'best') 239 | ax.set_title('Variance of $u(t,x)$', fontsize = 10) 240 | savefig('./Variance') 241 | 242 | 243 | 244 | -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/Initial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noisy/Initial.png -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noisy/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noisy/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/burgers_shock.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Burgers/Burgers_Noisy/burgers_shock.mat -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import timeit 10 | 11 | class Burgers_UQPINN: 12 | # Initialize the class 13 | def __init__(self, X_f, T_f, X_u, T_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.0, beta = 1.0): 14 | 15 | # Normalize data 16 | self.Xmean, self.Xstd = X_f.mean(0), X_f.std(0) 17 | self.Tmean, self.Tstd = T_f.mean(0), T_f.std(0) 18 | self.Ymean, self.Ystd = Y_u.mean(0), Y_u.std(0) 19 | X_f = (X_f - self.Xmean) / self.Xstd 20 | X_u = (X_u - self.Xmean) / self.Xstd 21 | T_f = (T_f - self.Tmean) / self.Tstd 22 | T_u = (T_u - self.Tmean) / self.Tstd 23 | 24 | # Jacobian of the PDE because of normalization 25 | self.Jacobian_X = 1 / self.Xstd 26 | self.Jacobian_T = 1 / self.Tstd 27 | 28 | 29 | self.X_f = X_f 30 | self.X_u = X_u 31 | self.T_f = T_f 32 | self.T_u = T_u 33 | self.Y_u = Y_u 34 | 35 | self.layers_P = layers_P 36 | self.layers_Q = layers_Q 37 | self.layers_T = layers_T 38 | 39 | self.X_dim = X_u.shape[1] 40 | self.T_dim = T_u.shape[1] 41 | self.Y_dim = Y_u.shape[1] 42 | self.Z_dim = layers_Q[-1] 43 | self.lam = lam 44 | self.beta = beta 45 | 46 | self.k1 = 1 47 | self.k2 = 5 48 | 49 | # Initialize network weights and biases 50 | self.weights_P, self.biases_P = self.initialize_NN(layers_P) 51 | self.weights_Q, self.biases_Q = self.initialize_NN(layers_Q) 52 | self.weights_T, self.biases_T = self.initialize_NN(layers_T) 53 | 54 | # Define Tensorflow session 55 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 56 | 57 | # Define placeholders and computational graph 58 | self.X_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 59 | self.X_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 60 | self.T_u_tf = tf.placeholder(tf.float32, shape=(None, self.T_dim)) 61 | self.T_f_tf = tf.placeholder(tf.float32, shape=(None, self.T_dim)) 62 | self.Y_u_tf = tf.placeholder(tf.float32, shape=(None, self.Y_dim)) 63 | self.Z_u_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 64 | self.Z_f_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 65 | 66 | self.Y_u_pred = self.net_P(self.X_u_tf, self.T_u_tf, self.Z_u_tf) 67 | self.Y_f_pred = self.get_r(self.X_f_tf, self.T_f_tf, self.Z_f_tf) 68 | 69 | # Generator loss (to be minimized) 70 | self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss = self.compute_generator_loss(self.X_u_tf, self.T_u_tf, self.Y_u_tf, 71 | self.Y_u_pred, self.X_f_tf, self.T_f_tf, self.Y_f_pred, self.Z_u_tf, self.Z_f_tf) 72 | 73 | # Discriminator loss (to be minimized) 74 | self.T_loss = self.compute_discriminator_loss(self.X_u_tf, self.T_u_tf, self.Y_u_tf, self.Z_u_tf) 75 | 76 | # Generator samples of y given x and t 77 | self.sample = self.sample_generator(self.X_u_tf, self.T_u_tf, self.Z_u_tf) 78 | 79 | # Define optimizer 80 | self.optimizer_KL = tf.train.AdamOptimizer(1e-4) 81 | self.optimizer_T = tf.train.AdamOptimizer(1e-4) 82 | 83 | # Define train Ops 84 | self.train_op_KL = self.optimizer_KL.minimize(self.G_loss, 85 | var_list = [self.weights_P, self.biases_P, 86 | self.weights_Q, self.biases_Q]) 87 | 88 | self.train_op_T = self.optimizer_T.minimize(self.T_loss, 89 | var_list = [self.weights_T, self.biases_T]) 90 | 91 | # Initialize Tensorflow variables 92 | init = tf.global_variables_initializer() 93 | self.sess.run(init) 94 | 95 | 96 | # Initialize network weights and biases using Xavier initialization 97 | def initialize_NN(self, layers): 98 | # Xavier initialization 99 | def xavier_init(size): 100 | in_dim = size[0] 101 | out_dim = size[1] 102 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 103 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, dtype=tf.float32) 104 | 105 | weights = [] 106 | biases = [] 107 | num_layers = len(layers) 108 | for l in range(0,num_layers-1): 109 | W = xavier_init(size=[layers[l], layers[l+1]]) 110 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 111 | weights.append(W) 112 | biases.append(b) 113 | return weights, biases 114 | 115 | 116 | # Evaluates the forward pass 117 | def forward_pass(self, H, layers, weights, biases): 118 | num_layers = len(layers) 119 | for l in range(0,num_layers-2): 120 | W = weights[l] 121 | b = biases[l] 122 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 123 | W = weights[-1] 124 | b = biases[-1] 125 | H = tf.add(tf.matmul(H, W), b) 126 | return H 127 | 128 | # right hand side terms of the PDE (in this case are zero) 129 | def f(self, X_normalized): 130 | return tf.zeros_like(X_normalized) 131 | 132 | # Decoder: p(y|x,z) 133 | def net_P(self, X, T, Z): 134 | Y = self.forward_pass(tf.concat([X, T, Z], 1), 135 | self.layers_P, 136 | self.weights_P, 137 | self.biases_P) 138 | return Y 139 | 140 | # Encoder: q(z|x,y) 141 | def net_Q(self, X, T, Y): 142 | Z = self.forward_pass(tf.concat([X, T, Y], 1), 143 | self.layers_Q, 144 | self.weights_Q, 145 | self.biases_Q) 146 | return Z 147 | 148 | # Discriminator 149 | def net_T(self, X, T, Y): 150 | T = self.forward_pass(tf.concat([X, T, Y], 1), 151 | self.layers_T, 152 | self.weights_T, 153 | self.biases_T) 154 | return T 155 | 156 | # Physics-Informed neural network prediction 157 | def get_u(self, X, T, Z): 158 | z_prior = Z 159 | u = self.net_P(X, T, z_prior) 160 | return u 161 | 162 | # Physics-Informed residual on the collocation points 163 | def get_r(self, X, T, Z): 164 | z_prior = Z 165 | u = self.net_P(X, T, z_prior) 166 | u_t = tf.gradients(u, T)[0] 167 | u_x = tf.gradients(u, X)[0] 168 | u_xx = tf.gradients(u_x, X)[0] 169 | f = self.f(X) 170 | r = (self.Jacobian_T) * u_t + (self.Jacobian_X) * u * u_x - (0.01/np.pi) * (self.Jacobian_X ** 2) * u_xx - f 171 | return r 172 | 173 | # Compute the generator loss 174 | def compute_generator_loss(self, X_u, T_u, Y_u, Y_u_pred, X_f, T_f, Y_f_pred, Z_u, Z_f): 175 | # Prior: 176 | z_u_prior = Z_u 177 | z_f_prior = Z_f 178 | # Encoder: q(z|x,y) 179 | z_u_encoder = self.net_Q(X_u, T_u, Y_u_pred) 180 | z_f_encoder = self.net_Q(X_f, T_f, Y_f_pred) 181 | # Discriminator loss 182 | Y_pred = self.net_P(X_u, T_u, Z_u) 183 | T_pred = self.net_T(X_u, T_u, Y_pred) 184 | 185 | # KL-divergence between the data distribution and the model distribution 186 | KL = tf.reduce_mean(T_pred) 187 | 188 | # Entropic regularization 189 | log_q = - tf.reduce_mean(tf.square(z_u_prior-z_u_encoder)) 190 | 191 | # Physics-informed loss 192 | loss_f = tf.reduce_mean(tf.square(Y_f_pred)) 193 | 194 | # Generator loss 195 | loss = KL + (1.0-self.lam)*log_q + self.beta * loss_f 196 | 197 | return loss, KL, (1.0-self.lam)*log_q, self.beta * loss_f 198 | 199 | # Compute the discriminator loss 200 | def compute_discriminator_loss(self, X, T, Y, Z): 201 | # Prior: p(z) 202 | z_prior = Z 203 | # Decoder: p(y|x,z) 204 | Y_pred = self.net_P(X, T, z_prior) 205 | 206 | # Discriminator loss 207 | T_real = self.net_T(X, T, Y) 208 | T_fake = self.net_T(X, T, Y_pred) 209 | 210 | T_real = tf.sigmoid(T_real) 211 | T_fake = tf.sigmoid(T_fake) 212 | 213 | T_loss = -tf.reduce_mean(tf.log(1.0 - T_real + 1e-8) + \ 214 | tf.log(T_fake + 1e-8)) 215 | 216 | return T_loss 217 | 218 | 219 | # Fetches a mini-batch of data 220 | def fetch_minibatch(self, X_u, T_u, X_f, T_f, Y_u, N_batch_u, N_batch_f): 221 | N_u = X_u.shape[0] 222 | N_f = X_f.shape[0] 223 | idx_u = np.random.choice(N_u, N_batch_u, replace=False) 224 | idx_f = np.random.choice(N_f, N_batch_f, replace=False) 225 | X_u_batch = X_u[idx_u,:] 226 | T_u_batch = T_u[idx_u,:] 227 | X_f_batch = X_f[idx_f,:] 228 | T_f_batch = T_f[idx_f,:] 229 | Y_u_batch = Y_u[idx_u,:] 230 | return X_u_batch, T_u_batch, X_f_batch, T_f_batch, Y_u_batch 231 | 232 | 233 | # Trains the model 234 | def train(self, nIter = 20000, N_u = 300, N_f = 5000, batch_size_u = 50, batch_size_f = 500): 235 | 236 | start_time = timeit.default_timer() 237 | 238 | # Store generator loss and discriminator loss as function of iteration 239 | iteration = [] 240 | loss_D = [] 241 | loss_G = [] 242 | for it in range(nIter): 243 | # Fetch a mini-batch of data 244 | X_u_batch, T_u_batch, X_f_batch, T_f_batch, Y_u_batch = self.fetch_minibatch(self.X_u, self.T_u, self.X_f, self.T_f, 245 | self.Y_u, batch_size_u, batch_size_f) 246 | 247 | Z_u = np.random.randn(batch_size_u, 1) 248 | Z_f = np.random.randn(batch_size_f, 1) 249 | 250 | # Define a dictionary for associating placeholders with data 251 | tf_dict = {self.X_u_tf: X_u_batch, self.T_u_tf: T_u_batch, self.Y_u_tf: Y_u_batch, self.X_f_tf: X_f_batch, 252 | self.T_f_tf: T_f_batch, self.Z_u_tf: Z_u, self.Z_f_tf: Z_f} 253 | 254 | # Run the Tensorflow session to minimize the loss 255 | for i in range(self.k1): 256 | self.sess.run(self.train_op_T, tf_dict) 257 | for j in range(self.k2): 258 | self.sess.run(self.train_op_KL, tf_dict) 259 | 260 | # Print 261 | if it % 100 == 0: 262 | elapsed = timeit.default_timer() - start_time 263 | loss_GG, loss_KL_value, reconv, loss_PDE = self.sess.run([self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss], tf_dict) 264 | loss_T_value = self.sess.run(self.T_loss, tf_dict) 265 | 266 | iteration.append(it) 267 | loss_D.append(loss_T_value) 268 | loss_G.append(loss_GG) 269 | print('It: %d, KL_loss: %.2e, Recon_loss: %.2e, PDE_loss: %.2e, T_loss: %.2e, Time: %.2f' % 270 | (it, loss_KL_value, reconv, loss_PDE, loss_T_value, elapsed)) 271 | start_time = timeit.default_timer() 272 | 273 | # Generate samples of y given x by sampling from the latent space z 274 | def sample_generator(self, X, T, Z): 275 | # Prior: 276 | z_prior = Z 277 | # Decoder: p(y|x,z) 278 | Y_pred = self.net_P(X, T, z_prior) 279 | return Y_pred 280 | 281 | # Predict y given x 282 | def generate_sample(self, X_star, T_star): 283 | X_star = (X_star - self.Xmean) / self.Xstd 284 | T_star = (T_star - self.Tmean) / self.Tstd 285 | Z = np.random.randn(X_star.shape[0], 1) 286 | tf_dict = {self.X_u_tf: X_star, self.T_u_tf: T_star, self.Z_u_tf: Z} 287 | Y_star = self.sess.run(self.sample, tf_dict) 288 | Y_star = Y_star 289 | return Y_star 290 | 291 | def predict_f(self, X_star, T_star): 292 | # Center around the origin 293 | X_star = (X_star - self.Xmean) / self.Xstd 294 | T_star = (T_star - self.Tmean) / self.Tstd 295 | # Predict 296 | z_f = np.random.randn(X_star.shape[0], self.Z_dim) 297 | tf_dict = {self.X_f_tf: X_star[:,0:1], self.T_f_tf: T_star, self.Z_f_tf: z_f} 298 | f_star = self.sess.run(self.Y_f_pred, tf_dict) 299 | return f_star 300 | 301 | -------------------------------------------------------------------------------- /Burgers/Burgers_Noisy/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/Darcy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import os 8 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | plt.switch_backend('agg') 12 | from pyDOE import lhs 13 | 14 | from models import UQ_PINN 15 | 16 | import scipy.io 17 | from scipy.interpolate import griddata 18 | from pyDOE import lhs 19 | from plotting import newfig, savefig 20 | from mpl_toolkits.mplot3d import Axes3D 21 | import time 22 | import matplotlib.gridspec as gridspec 23 | from mpl_toolkits.axes_grid1 import make_axes_locatable 24 | 25 | np.random.seed(1234) 26 | 27 | if __name__ == "__main__": 28 | 29 | # Load the data 30 | data = np.load('./nonlinear2d_data.npz') 31 | X = data['X'] 32 | K = data['k'] 33 | U = data['u'] 34 | 35 | # Exact relation between k and u 36 | def k_vanGenuchten(u): 37 | alpha = 0.1 38 | n = 1.885 39 | m = 1.0 - 1.0/n 40 | s = (1.0 + (alpha*np.abs(u))**n)**(-m) 41 | k = np.sqrt(s)*(1.0 - (1.0 - s**(1.0/m))**m)**2 42 | return k 43 | 44 | 45 | N = 10000 46 | N_f = N 47 | N_u = 200 48 | N_b = 100 # for one boundary 49 | 50 | X_dim = 1 51 | Y_dim = 1 52 | Z_dim = 2 53 | 54 | L1 = 10. 55 | L2 = 10. 56 | 57 | X_u = np.zeros((N_u,2)) 58 | Y_u = np.zeros((N_u,1)) 59 | X_f = np.zeros((N_f,2)) 60 | 61 | # Boundary points 62 | x1_b1 = np.zeros(N_b)[:,None] 63 | x2_b1 = L2 * np.random.random(N_b)[:,None] 64 | X_b1 = np.hstack((x1_b1, x2_b1)) 65 | x1_b2 = L1 * np.random.random(N_b)[:,None] 66 | x2_b2 = np.zeros(N_b)[:,None] 67 | X_b2 = np.hstack((x1_b2, x2_b2)) 68 | x1_b3 = L1 * np.ones(N_b)[:,None] 69 | x2_b3 = L2 * np.random.random(N_b)[:,None] 70 | X_b3 = np.hstack((x1_b3, x2_b3)) 71 | x1_b4 = L1 * np.random.random(N_b)[:,None] 72 | x2_b4 = L2 * np.ones(N_b)[:,None] 73 | X_b4 = np.hstack((x1_b4, x2_b4)) 74 | X_b = np.hstack((X_b1, X_b2)) 75 | X_b = np.hstack((X_b, X_b3)) 76 | X_b = np.hstack((X_b, X_b4)) 77 | 78 | # Collocation points 79 | X1_f = L1 * np.random.random(N_f)[:,None] 80 | X2_f = L2 * np.random.random(N_f)[:,None] 81 | X_f = np.hstack((X1_f, X2_f)) 82 | 83 | U_data = U 84 | X_data = X 85 | 86 | idx_u = np.random.choice(N, N_u, replace=False) 87 | for i in range(N_u): 88 | X_u[i,:] = X_data[idx_u[i],:] 89 | Y_u[i,:] = U_data[idx_u[i]] 90 | 91 | 92 | # Model creation 93 | layers_P_u = np.array([X_dim+X_dim+Z_dim,50,50,50,50,Y_dim]) 94 | layers_Q = np.array([X_dim+X_dim+Y_dim,50,50,50,50,Z_dim]) 95 | layers_T = np.array([X_dim+X_dim+Y_dim,50,50,50,1]) 96 | layers_P_k = np.array([Y_dim,50,50,50,50,Y_dim]) 97 | 98 | model = UQ_PINN(X_u, X_b, Y_u, X_f, layers_P_u, layers_P_k, layers_Q, layers_T, lam = 1.5, beta = 1.0, q = 1., u_0 = - 10.) 99 | 100 | model.train(nIter = 30000) 101 | 102 | X_star = X 103 | k_star = K.T 104 | u_star = U.T 105 | 106 | # Domain bounds 107 | lb, ub = X.min(0), X.max(0) 108 | # Plot 109 | nn = 200 110 | x = np.linspace(lb[0], ub[0], nn) 111 | y = np.linspace(lb[1], ub[1], nn) 112 | XX, YY = np.meshgrid(x,y) 113 | 114 | K_plot = griddata(X_star, k_star.flatten(), (XX, YY), method='cubic') 115 | U_plot = griddata(X_star, u_star.flatten(), (XX, YY), method='cubic') 116 | 117 | N_samples = 500 118 | kkk = np.zeros((X_star.shape[0], N_samples)) 119 | uuu = np.zeros((X_star.shape[0], N_samples)) 120 | fff = np.zeros((X_star.shape[0], N_samples)) 121 | for i in range(0, N_samples): 122 | kkk[:,i:i+1] = model.predict_k(X_star) 123 | uuu[:,i:i+1] = model.predict_u(X_star) 124 | fff[:,i:i+1] = model.predict_f(X_star) 125 | 126 | np.save('uuu0.npy', uuu) 127 | np.save('kkk0.npy', kkk) 128 | 129 | kkk_mu_pred = np.mean(kkk, axis = 1) 130 | kkk_Sigma_pred = np.var(kkk, axis = 1) 131 | uuu_mu_pred = np.mean(uuu, axis = 1) 132 | uuu_Sigma_pred = np.var(uuu, axis = 1) 133 | fff_mu_pred = np.mean(fff, axis = 1) 134 | fff_Sigma_pred = np.var(fff, axis = 1) 135 | 136 | 137 | K_mu_plot = griddata(X_star, kkk_mu_pred.flatten(), (XX, YY), method='cubic') 138 | U_mu_plot = griddata(X_star, uuu_mu_pred.flatten(), (XX, YY), method='cubic') 139 | F_mu_plot = griddata(X_star, fff_mu_pred.flatten(), (XX, YY), method='cubic') 140 | K_Sigma_plot = griddata(X_star, kkk_Sigma_pred.flatten(), (XX, YY), method='cubic') 141 | U_Sigma_plot = griddata(X_star, uuu_Sigma_pred.flatten(), (XX, YY), method='cubic') 142 | F_Sigma_plot = griddata(X_star, fff_Sigma_pred.flatten(), (XX, YY), method='cubic') 143 | 144 | fig = plt.figure(2,figsize=(12,12)) 145 | plt.subplot(2,2,1) 146 | plt.xticks(fontsize=15) 147 | plt.yticks(fontsize=15) 148 | plt.pcolor(XX, YY, K_plot, cmap='viridis') 149 | plt.colorbar().ax.tick_params(labelsize=15) 150 | plt.xlabel('$x_1$', fontsize=15) 151 | plt.ylabel('$x_2$', fontsize=15) 152 | plt.title('Exact $k(x_1,x_2)$', fontsize=15) 153 | 154 | plt.subplot(2,2,2) 155 | plt.xticks(fontsize=15) 156 | plt.yticks(fontsize=15) 157 | plt.pcolor(XX, YY, K_mu_plot, cmap='viridis') 158 | plt.colorbar().ax.tick_params(labelsize=15) 159 | plt.xlabel('$x_1$', fontsize=15) 160 | plt.ylabel('$x_2$', fontsize=15) 161 | plt.title('Prediction $k(x_1,x_2)$', fontsize=15) 162 | 163 | plt.subplot(2,2,3) 164 | plt.xticks(fontsize=15) 165 | plt.yticks(fontsize=15) 166 | plt.pcolor(XX, YY, np.abs(K_plot - K_mu_plot), cmap='viridis') 167 | plt.colorbar().ax.tick_params(labelsize=15) 168 | plt.xlabel('$x_1$', fontsize=15) 169 | plt.ylabel('$x_2$', fontsize=15) 170 | plt.title('Error of $k(x_1,x_2)$', fontsize=15) 171 | 172 | plt.subplot(2,2,4) 173 | plt.xticks(fontsize=15) 174 | plt.yticks(fontsize=15) 175 | plt.pcolor(XX, YY, np.abs(K_plot - K_mu_plot) / K_plot, cmap='viridis') 176 | plt.colorbar().ax.tick_params(labelsize=15) 177 | plt.xlabel('$x_1$', fontsize=15) 178 | plt.ylabel('$x_2$', fontsize=15) 179 | plt.title('Relative error of $k(x_1,x_2)$', fontsize=15) 180 | plt.savefig('./reconstruction.png', dpi = 600) 181 | 182 | u = np.load('uuu0.npy') 183 | k = np.load('kkk0.npy') 184 | u_mu = np.mean(u, axis = 1) 185 | u = np.zeros((10000, 500)) 186 | for i in range(500): 187 | u[:,i] = u_mu 188 | 189 | u = u.reshape(1,-1) 190 | k = k.reshape(1,-1) 191 | idx = np.random.choice(5000000, 1000, replace=False) 192 | u_p = u[:,idx] 193 | k_p = k[:,idx] 194 | 195 | 196 | u = np.linspace(-10.,-4., 1000) 197 | k = k_vanGenuchten(u) 198 | 199 | plt.figure(10, figsize=(6, 4)) 200 | plt.xticks(fontsize=11) 201 | plt.yticks(fontsize=11) 202 | plt.plot(u_p,k_p, 'bo') 203 | plt.plot(u,k, 'r-', label = "Exact", linewidth=2) 204 | ax = plt.gca() 205 | plt.xlabel('$u$',fontsize=11) 206 | plt.ylabel('$K(u)$',fontsize=11) 207 | plt.savefig('./UK.png', dpi = 600) 208 | 209 | 210 | 211 | -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noise_free/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noise_free/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import timeit 10 | 11 | 12 | class UQ_PINN: 13 | # Initialize the class 14 | def __init__(self, X_u, X_b, Y_u, X_f, layers_P_u, layers_P_k, layers_Q, layers_T, lam = 1.5, beta = 1.0, q = 1, u_0 = - 10.): 15 | 16 | # Normalize data 17 | self.lb = np.array([0.0, 0.0]) 18 | self.ub = np.array([10.0, 10.0]) 19 | self.lbb = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) 20 | self.ubb = np.array([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]) 21 | X_u = (X_u - self.lb) - 0.5*(self.ub - self.lb) 22 | X_b = (X_b - self.lbb) - 0.5*(self.ubb - self.lbb) 23 | X_f = (X_f - self.lb) - 0.5*(self.ub - self.lb) 24 | 25 | 26 | self.q = q 27 | self.u_0 = u_0 28 | self.ksat = 10. 29 | 30 | self.x1_u = X_u[:,0:1] # dimension N_u x 1 31 | self.x2_u = X_u[:,1:2] # dimension N_u x 1 32 | self.y_u = Y_u # dimension N_u 33 | 34 | self.x1_f = X_f[:,0:1] # dimension N_f x 1 35 | self.x2_f = X_f[:,1:2] # dimension N_f x 1 36 | 37 | # Position of the boundary 38 | self.x1_b1 = X_b[:,0:1] 39 | self.x2_b1 = X_b[:,1:2] 40 | self.x1_b2 = X_b[:,2:3] 41 | self.x2_b2 = X_b[:,3:4] 42 | self.x1_b3 = X_b[:,4:5] 43 | self.x2_b3 = X_b[:,5:6] 44 | self.x1_b4 = X_b[:,6:7] 45 | self.x2_b4 = X_b[:,7:8] 46 | 47 | # Layers of the neural networks 48 | self.layers_P_u = layers_P_u 49 | self.layers_Q = layers_Q 50 | self.layers_T = layers_T 51 | self.layers_P_k = layers_P_k 52 | 53 | # Dimensions of the inputs, outputs, latent variables 54 | self.X_dim = self.x1_u.shape[1] 55 | self.Y_u_dim = self.y_u.shape[1] 56 | self.Y_k_dim = self.y_u.shape[1] 57 | self.Y_f_dim = self.y_u.shape[1] 58 | self.Z_dim = layers_Q[-1] 59 | 60 | # Regularization parameters 61 | self.lam = lam 62 | self.beta = beta 63 | 64 | # Ratio of training for generator and discriminator 65 | self.k1 = 1 66 | self.k2 = 5 67 | 68 | # Initialize network weights and biases 69 | self.weights_P_u, self.biases_P_u = self.initialize_NN(layers_P_u) 70 | self.weights_Q, self.biases_Q = self.initialize_NN(layers_Q) 71 | self.weights_T, self.biases_T = self.initialize_NN(layers_T) 72 | self.weights_P_k, self.biases_P_k = self.initialize_NN(layers_P_k) 73 | 74 | # Define Tensorflow session 75 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 76 | 77 | # Define placeholders and computational graph 78 | self.x1_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 79 | self.x2_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 80 | self.x1_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 81 | self.x2_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 82 | self.y_u_tf = tf.placeholder(tf.float32, shape=(None, self.Y_u_dim)) 83 | self.y_k_tf = tf.placeholder(tf.float32, shape=(None, self.Y_k_dim)) 84 | self.y_f_tf = tf.placeholder(tf.float32, shape=(None, self.Y_f_dim)) 85 | 86 | self.x1_b1_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 87 | self.x2_b1_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 88 | self.x1_b2_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 89 | self.x2_b2_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 90 | self.x1_b3_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 91 | self.x2_b3_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 92 | self.x1_b4_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 93 | self.x2_b4_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 94 | 95 | self.z_b1_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 96 | self.z_b2_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 97 | self.z_b3_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 98 | self.z_b4_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 99 | self.z_u_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 100 | self.z_f_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 101 | 102 | self.y_u_pred = self.net_P_u(self.x1_u_tf, self.x2_u_tf, self.z_u_tf) 103 | self.y_b1_pred = self.get_b1(self.x1_b1_tf, self.x2_b1_tf, self.z_b1_tf) 104 | self.y_b2_pred = self.get_b2(self.x1_b2_tf, self.x2_b2_tf, self.z_b2_tf) 105 | self.y_b3_pred = self.get_b3(self.x1_b3_tf, self.x2_b3_tf, self.z_b3_tf) 106 | self.y_b4_pred = self.get_b4(self.x1_b4_tf, self.x2_b4_tf, self.z_b4_tf) 107 | self.y_k_pred = self.net_P_k(self.y_u_pred) 108 | self.y_f_pred = self.get_f(self.x1_f_tf, self.x2_f_tf, self.z_f_tf) 109 | 110 | # Generator loss (to be minimized) 111 | self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss = self.compute_generator_loss(self.x1_u_tf, self.x2_u_tf, 112 | self.y_u_pred, self.y_f_pred, self.y_b1_pred, self.y_b2_pred, 113 | self.y_b3_pred, self.y_b4_pred, self.z_u_tf) 114 | 115 | # Discriminator loss (to be minimized) 116 | self.T_loss = self.compute_discriminator_loss(self.x1_u_tf, self.x2_u_tf, self.y_u_tf, self.z_u_tf) 117 | 118 | # Define optimizer 119 | self.optimizer_KL = tf.train.AdamOptimizer(1e-4) 120 | self.optimizer_T = tf.train.AdamOptimizer(1e-4) 121 | 122 | # Define train Ops 123 | self.train_op_KL = self.optimizer_KL.minimize(self.G_loss, 124 | var_list = [self.weights_P_u, self.biases_P_u, self.weights_P_k, self.biases_P_k, 125 | self.weights_Q, self.biases_Q]) 126 | 127 | self.train_op_T = self.optimizer_T.minimize(self.T_loss, 128 | var_list = [self.weights_T, self.biases_T]) 129 | 130 | # Initialize Tensorflow variables 131 | init = tf.global_variables_initializer() 132 | self.sess.run(init) 133 | 134 | 135 | # Initialize network weights and biases using Xavier initialization 136 | def initialize_NN(self, layers): 137 | # Xavier initialization 138 | def xavier_init(size): 139 | in_dim = size[0] 140 | out_dim = size[1] 141 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 142 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, dtype=tf.float32) 143 | 144 | weights = [] 145 | biases = [] 146 | num_layers = len(layers) 147 | for l in range(0,num_layers-1): 148 | W = xavier_init(size=[layers[l], layers[l+1]]) 149 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 150 | weights.append(W) 151 | biases.append(b) 152 | return weights, biases 153 | 154 | 155 | # Evaluates the forward pass 156 | def forward_pass(self, H, layers, weights, biases): 157 | num_layers = len(layers) 158 | for l in range(0,num_layers-2): 159 | W = weights[l] 160 | b = biases[l] 161 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 162 | W = weights[-1] 163 | b = biases[-1] 164 | H = tf.add(tf.matmul(H, W), b) 165 | return H 166 | 167 | def f(self, X_normalized): 168 | return tf.zeros_like(X_normalized) 169 | 170 | # Decoder: p(y|x,z) 171 | def net_P_u(self, X1, X2, Z): 172 | Y = self.forward_pass(tf.concat([X1, X2, Z], 1), 173 | self.layers_P_u, 174 | self.weights_P_u, 175 | self.biases_P_u) 176 | return Y 177 | 178 | # Encoder: q(z|x,y) 179 | def net_Q(self, X1, X2, Y): 180 | Z = self.forward_pass(tf.concat([X1, X2, Y], 1), 181 | self.layers_Q, 182 | self.weights_Q, 183 | self.biases_Q) 184 | return Z 185 | 186 | # Discriminator 187 | def net_T(self, X1, X2, Y): 188 | T = self.forward_pass(tf.concat([X1, X2, Y], 1), 189 | self.layers_T, 190 | self.weights_T, 191 | self.biases_T) 192 | return T 193 | 194 | # Decoder: p(y|x,z) 195 | def net_P_k(self, U): 196 | Y = self.forward_pass(U, 197 | self.layers_P_k, 198 | self.weights_P_k, 199 | self.biases_P_k) 200 | return self.ksat * tf.exp(Y) 201 | 202 | 203 | def get_u(self, X1, X2, Z): 204 | z_prior = Z 205 | u = self.net_P_u(X1, X2, z_prior) 206 | return u 207 | 208 | def get_k(self, U): 209 | u = self.net_P_k(U) 210 | return u 211 | 212 | def get_b1(self, X1, X2, Z): 213 | z_prior = Z 214 | u = self.net_P_u(X1, X2, z_prior) 215 | u_x1 = tf.gradients(u, X1)[0] 216 | k = self.net_P_k(u) 217 | temp = self.q + k * u_x1 218 | return temp 219 | 220 | def get_b2(self, X1, X2, Z): 221 | z_prior = Z 222 | u = self.net_P_u(X1, X2, z_prior) 223 | u_x2 = tf.gradients(u, X2)[0] 224 | return u_x2 225 | 226 | def get_b3(self, X1, X2, Z): 227 | z_prior = Z 228 | u = self.net_P_u(X1, X2, z_prior) 229 | temp = u - self.u_0 230 | return temp 231 | 232 | def get_b4(self, X1, X2, Z): 233 | z_prior = Z 234 | u = self.net_P_u(X1, X2, z_prior) 235 | u_x2 = tf.gradients(u, X2)[0] 236 | return u_x2 237 | 238 | def get_f(self, X1, X2, Z_u): 239 | u = self.net_P_u(X1, X2, Z_u) 240 | k = self.net_P_k(u) 241 | u_x1 = tf.gradients(u, X1)[0] 242 | u_x2 = tf.gradients(u, X2)[0] 243 | f_1 = tf.gradients(k*u_x1, X1)[0] 244 | f_2 = tf.gradients(k*u_x2, X2)[0] 245 | f = f_1 + f_2 246 | return f 247 | 248 | def compute_generator_loss(self, x1_u, x2_u, y_u_pred, y_f_pred, y_b1_pred, y_b2_pred, y_b3_pred, y_b4_pred, z_u): 249 | # Encoder: q(z|x,y) 250 | z_u_prior = z_u 251 | 252 | z_u_encoder = self.net_Q(x1_u, x2_u, y_u_pred) 253 | 254 | y_u_pred = self.net_P_u(x1_u, x2_u, z_u) 255 | T_pred = self.net_T(x1_u, x2_u, y_u_pred) 256 | 257 | # KL-divergence between the data and the generator samples 258 | KL = tf.reduce_mean(T_pred) 259 | 260 | # Entropic regularization 261 | log_q = - tf.reduce_mean(tf.square(z_u_prior-z_u_encoder)) 262 | 263 | # Physics-informed loss 264 | loss_f = tf.reduce_mean(tf.square(y_f_pred)) + tf.reduce_mean(tf.square(y_b1_pred)) +\ 265 | tf.reduce_mean(tf.square(y_b2_pred)) + tf.reduce_mean(tf.square(y_b3_pred)) + tf.reduce_mean(tf.square(y_b4_pred)) 266 | 267 | # Generator loss 268 | loss = KL + (1.0-self.lam)*log_q + self.beta * loss_f 269 | 270 | return loss, KL, (1.0-self.lam)*log_q, self.beta * loss_f 271 | 272 | 273 | def compute_discriminator_loss(self, X1, X2, Y, Z): 274 | # Prior: p(z) 275 | z_prior = Z 276 | # Decoder: p(y|x,z) 277 | Y_pred = self.net_P_u(X1, X2, z_prior) 278 | 279 | # Discriminator loss 280 | T_real = self.net_T(X1, X2, Y) 281 | T_fake = self.net_T(X1, X2, Y_pred) 282 | 283 | T_real = tf.sigmoid(T_real) 284 | T_fake = tf.sigmoid(T_fake) 285 | 286 | T_loss = -tf.reduce_mean(tf.log(1.0 - T_real + 1e-8) + \ 287 | tf.log(T_fake + 1e-8)) 288 | 289 | return T_loss 290 | 291 | # Trains the model 292 | def train(self, nIter = 20000): 293 | 294 | start_time = timeit.default_timer() 295 | for it in range(nIter): 296 | 297 | # Sampling from latent spaces 298 | z_u = np.random.randn(self.x1_u.shape[0], self.Z_dim) 299 | z_f = np.random.randn(self.x1_f.shape[0], self.Z_dim) 300 | z_b1 = np.random.randn(self.x1_b1.shape[0], self.Z_dim) 301 | z_b2 = np.random.randn(self.x1_b2.shape[0], self.Z_dim) 302 | z_b3 = np.random.randn(self.x1_b3.shape[0], self.Z_dim) 303 | z_b4 = np.random.randn(self.x1_b4.shape[0], self.Z_dim) 304 | 305 | # Define a dictionary for associating placeholders with data 306 | tf_dict = {self.x1_u_tf: self.x1_u, self.x2_u_tf: self.x2_u, self.x1_f_tf: self.x1_f, self.x2_f_tf: self.x2_f, 307 | self.y_u_tf: self.y_u, self.x1_b1_tf: self.x1_b1, self.x2_b1_tf: self.x2_b1, self.x1_b2_tf: self.x1_b2, self.x2_b2_tf: self.x2_b2, 308 | self.x1_b3_tf: self.x1_b3, self.x2_b3_tf: self.x2_b3, self.x1_b4_tf: self.x1_b4, self.x2_b4_tf: self.x2_b4, 309 | self.z_u_tf: z_u, self.z_f_tf: z_f, self.z_b1_tf: z_b1, self.z_b2_tf: z_b2, self.z_b3_tf: z_b3, self.z_b4_tf: z_b4} 310 | 311 | # Run the Tensorflow session to minimize the loss 312 | for i in range(self.k1): 313 | self.sess.run(self.train_op_T, tf_dict) 314 | for j in range(self.k2): 315 | self.sess.run(self.train_op_KL, tf_dict) 316 | 317 | # Print 318 | if it % 100 == 0: 319 | elapsed = timeit.default_timer() - start_time 320 | loss_KL_value, reconv, loss_PDE = self.sess.run([self.KL_loss, self.recon_loss, self.PDE_loss], tf_dict) 321 | loss_T_value = self.sess.run(self.T_loss, tf_dict) 322 | print('It: %d, KL_loss: %.2e, Recon_loss: %.2e, PDE_loss: %.2e, T_loss: %.2e, Time: %.2f' % 323 | (it, loss_KL_value, reconv, loss_PDE, loss_T_value, elapsed)) 324 | start_time = timeit.default_timer() 325 | 326 | 327 | # Evaluates predictions at test points 328 | def predict_k(self, X_star): 329 | # Center around the origin 330 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 331 | # Predict 332 | z_u = np.random.randn(X_star.shape[0], self.Z_dim) 333 | tf_dict = {self.x1_u_tf: X_star[:,0:1], self.x2_u_tf: X_star[:,1:2], self.z_u_tf: z_u} 334 | k_star = self.sess.run(self.y_k_pred, tf_dict) 335 | return k_star / self.ksat 336 | 337 | # Evaluates predictions at test points 338 | def predict_u(self, X_star): 339 | # Center around the origin 340 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 341 | # Predict 342 | z_u = np.random.randn(X_star.shape[0], self.Z_dim) 343 | tf_dict = {self.x1_u_tf: X_star[:,0:1], self.x2_u_tf: X_star[:,1:2], self.z_u_tf: z_u} 344 | u_star = self.sess.run(self.y_u_pred, tf_dict) 345 | return u_star 346 | 347 | # Evaluates predictions at test points 348 | def predict_f(self, X_star): 349 | # Center around the origin 350 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 351 | # Predict 352 | z_f = np.random.randn(X_star.shape[0], self.Z_dim) 353 | tf_dict = {self.x1_f_tf: X_star[:,0:1], self.x2_f_tf: X_star[:,1:2], self.z_f_tf: z_f} 354 | f_star = self.sess.run(self.y_f_pred, tf_dict) 355 | return f_star 356 | 357 | # Predict the k as function of u 358 | def predict_k_from_u(self, u): 359 | tf_dict = {self.y_u_pred: u} 360 | k_star = self.sess.run(self.y_k_pred, tf_dict) 361 | return k_star / self.ksat 362 | 363 | 364 | 365 | -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/nonlinear2d_data.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noise_free/nonlinear2d_data.npz -------------------------------------------------------------------------------- /Darcy/Darcy_noise_free/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/Darcy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import os 8 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | plt.switch_backend('agg') 12 | from pyDOE import lhs 13 | 14 | from models import UQ_PINN 15 | 16 | import scipy.io 17 | from scipy.interpolate import griddata 18 | from pyDOE import lhs 19 | from plotting import newfig, savefig 20 | from mpl_toolkits.mplot3d import Axes3D 21 | import time 22 | import matplotlib.gridspec as gridspec 23 | from mpl_toolkits.axes_grid1 import make_axes_locatable 24 | 25 | np.random.seed(1234) 26 | 27 | if __name__ == "__main__": 28 | 29 | # Load the data 30 | data = np.load('./nonlinear2d_data.npz') 31 | X = data['X'] 32 | K = data['k'] 33 | U = data['u'] 34 | 35 | # Exact relation between k and u 36 | def k_vanGenuchten(u): 37 | alpha = 0.1 38 | n = 1.885 39 | m = 1.0 - 1.0/n 40 | s = (1.0 + (alpha*np.abs(u))**n)**(-m) 41 | k = np.sqrt(s)*(1.0 - (1.0 - s**(1.0/m))**m)**2 42 | return k 43 | 44 | N = 10000 45 | N_f = N 46 | N_u = 1000 47 | N_b = 100 # for one boundary 48 | 49 | X_dim = 1 50 | Y_dim = 1 51 | Z_dim = 2 52 | 53 | L1 = 10. 54 | L2 = 10. 55 | noise = 0.05 56 | 57 | X_u = np.zeros((N_u,2)) 58 | Y_u = np.zeros((N_u,1)) 59 | X_f = np.zeros((N_f,2)) 60 | 61 | # Boundary points 62 | x1_b1 = np.zeros(N_b)[:,None] 63 | x2_b1 = L2 * np.random.random(N_b)[:,None] 64 | X_b1 = np.hstack((x1_b1, x2_b1)) 65 | x1_b2 = L1 * np.random.random(N_b)[:,None] 66 | x2_b2 = np.zeros(N_b)[:,None] 67 | X_b2 = np.hstack((x1_b2, x2_b2)) 68 | x1_b3 = L1 * np.ones(N_b)[:,None] 69 | x2_b3 = L2 * np.random.random(N_b)[:,None] 70 | X_b3 = np.hstack((x1_b3, x2_b3)) 71 | x1_b4 = L1 * np.random.random(N_b)[:,None] 72 | x2_b4 = L2 * np.ones(N_b)[:,None] 73 | X_b4 = np.hstack((x1_b4, x2_b4)) 74 | X_b = np.hstack((X_b1, X_b2)) 75 | X_b = np.hstack((X_b, X_b3)) 76 | X_b = np.hstack((X_b, X_b4)) 77 | 78 | # Collocation points 79 | X1_f = L1 * np.random.random(N_f)[:,None] 80 | X2_f = L2 * np.random.random(N_f)[:,None] 81 | X_f = np.hstack((X1_f, X2_f)) 82 | 83 | U_data = U 84 | X_data = X 85 | 86 | idx_u = np.random.choice(N, N_u, replace=False) 87 | for i in range(N_u): 88 | X_u[i,:] = X_data[idx_u[i],:] 89 | Y_u[i,:] = U_data[idx_u[i]] 90 | 91 | # Corrupt the training data by noise 92 | Y_u = Y_u + noise * np.std(Y_u) * np.random.randn(N_u,Y_dim) 93 | # Model creation 94 | layers_P_u = np.array([X_dim+X_dim+Z_dim,50,50,50,50,Y_dim]) 95 | layers_Q = np.array([X_dim+X_dim+Y_dim,50,50,50,50,Z_dim]) 96 | layers_T = np.array([X_dim+X_dim+Y_dim,50,50,50,1]) 97 | layers_P_k = np.array([Y_dim,50,50,50,50,Y_dim]) 98 | 99 | model = UQ_PINN(X_u, X_b, Y_u, X_f, layers_P_u, layers_P_k, layers_Q, layers_T, lam = 1.5, beta = 1.0, q = 1., u_0 = - 10.) 100 | 101 | model.train(nIter = 30000) 102 | 103 | 104 | X_star = X 105 | k_star = K.T 106 | u_star = U.T 107 | 108 | # Domain bounds 109 | lb, ub = X.min(0), X.max(0) 110 | # Plot 111 | nn = 200 112 | x = np.linspace(lb[0], ub[0], nn) 113 | y = np.linspace(lb[1], ub[1], nn) 114 | XX, YY = np.meshgrid(x,y) 115 | 116 | K_plot = griddata(X_star, k_star.flatten(), (XX, YY), method='cubic') 117 | U_plot = griddata(X_star, u_star.flatten(), (XX, YY), method='cubic') 118 | 119 | 120 | N_samples = 500 121 | kkk = np.zeros((X_star.shape[0], N_samples)) 122 | uuu = np.zeros((X_star.shape[0], N_samples)) 123 | fff = np.zeros((X_star.shape[0], N_samples)) 124 | for i in range(0, N_samples): 125 | kkk[:,i:i+1] = model.predict_k(X_star) 126 | uuu[:,i:i+1] = model.predict_u(X_star) 127 | fff[:,i:i+1] = model.predict_f(X_star) 128 | 129 | np.save('uuu5.npy', uuu) 130 | np.save('kkk5.npy', kkk) 131 | 132 | kkk_mu_pred = np.mean(kkk, axis = 1) 133 | kkk_Sigma_pred = np.var(kkk, axis = 1) 134 | uuu_mu_pred = np.mean(uuu, axis = 1) 135 | uuu_Sigma_pred = np.var(uuu, axis = 1) 136 | fff_mu_pred = np.mean(fff, axis = 1) 137 | fff_Sigma_pred = np.var(fff, axis = 1) 138 | 139 | 140 | K_mu_plot = griddata(X_star, kkk_mu_pred.flatten(), (XX, YY), method='cubic') 141 | U_mu_plot = griddata(X_star, uuu_mu_pred.flatten(), (XX, YY), method='cubic') 142 | F_mu_plot = griddata(X_star, fff_mu_pred.flatten(), (XX, YY), method='cubic') 143 | K_Sigma_plot = griddata(X_star, kkk_Sigma_pred.flatten(), (XX, YY), method='cubic') 144 | U_Sigma_plot = griddata(X_star, uuu_Sigma_pred.flatten(), (XX, YY), method='cubic') 145 | F_Sigma_plot = griddata(X_star, fff_Sigma_pred.flatten(), (XX, YY), method='cubic') 146 | 147 | 148 | fig = plt.figure(2,figsize=(12,12)) 149 | plt.subplot(2,2,1) 150 | plt.xticks(fontsize=15) 151 | plt.yticks(fontsize=15) 152 | plt.pcolor(XX, YY, K_plot, cmap='viridis') 153 | plt.colorbar().ax.tick_params(labelsize=15) 154 | plt.xlabel('$x_1$', fontsize=15) 155 | plt.ylabel('$x_2$', fontsize=15) 156 | plt.title('Exact $k(x_1,x_2)$', fontsize=15) 157 | 158 | plt.subplot(2,2,2) 159 | plt.xticks(fontsize=15) 160 | plt.yticks(fontsize=15) 161 | plt.pcolor(XX, YY, K_mu_plot, cmap='viridis') 162 | plt.colorbar().ax.tick_params(labelsize=15) 163 | plt.xlabel('$x_1$', fontsize=15) 164 | plt.ylabel('$x_2$', fontsize=15) 165 | plt.title('Prediction $k(x_1,x_2)$', fontsize=15) 166 | 167 | plt.subplot(2,2,3) 168 | plt.xticks(fontsize=15) 169 | plt.yticks(fontsize=15) 170 | plt.pcolor(XX, YY, np.abs(K_plot - K_mu_plot), cmap='viridis') 171 | plt.colorbar().ax.tick_params(labelsize=15) 172 | plt.xlabel('$x_1$', fontsize=15) 173 | plt.ylabel('$x_2$', fontsize=15) 174 | plt.title('Error of $k(x_1,x_2)$', fontsize=15) 175 | 176 | plt.subplot(2,2,4) 177 | plt.xticks(fontsize=15) 178 | plt.yticks(fontsize=15) 179 | plt.pcolor(XX, YY, np.abs(K_plot - K_mu_plot) / K_plot, cmap='viridis') 180 | plt.colorbar().ax.tick_params(labelsize=15) 181 | plt.xlabel('$x_1$', fontsize=15) 182 | plt.ylabel('$x_2$', fontsize=15) 183 | plt.title('Relative error of $k(x_1,x_2)$', fontsize=15) 184 | plt.savefig('./reconstruction.png', dpi = 600) 185 | 186 | u = np.load('uuu5.npy') 187 | k = np.load('kkk5.npy') 188 | u_mu = np.mean(u, axis = 1) 189 | u = np.zeros((10000, 500)) 190 | for i in range(500): 191 | u[:,i] = u_mu 192 | 193 | u = u.reshape(1,-1) 194 | k = k.reshape(1,-1) 195 | idx = np.random.choice(5000000, 1000, replace=False) 196 | u_p = u[:,idx] 197 | k_p = k[:,idx] 198 | 199 | 200 | 201 | u = np.linspace(-10.,-4., 1000) 202 | k = k_vanGenuchten(u) 203 | 204 | plt.figure(10, figsize=(6, 4)) 205 | plt.xticks(fontsize=11) 206 | plt.yticks(fontsize=11) 207 | plt.plot(u_p,k_p, 'bo') 208 | plt.plot(u,k, 'r-', label = "Exact", linewidth=2) 209 | ax = plt.gca() 210 | plt.xlabel('$u$',fontsize=11) 211 | plt.ylabel('$K(u)$',fontsize=11) 212 | plt.savefig('./UK.png', dpi = 600) 213 | 214 | 215 | 216 | -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noisy/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noisy/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import timeit 10 | 11 | 12 | class UQ_PINN: 13 | # Initialize the class 14 | def __init__(self, X_u, X_b, Y_u, X_f, layers_P_u, layers_P_k, layers_Q, layers_T, lam = 1.5, beta = 1.0, q = 1, u_0 = - 10.): 15 | 16 | # Normalize data 17 | self.lb = np.array([0.0, 0.0]) 18 | self.ub = np.array([10.0, 10.0]) 19 | self.lbb = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) 20 | self.ubb = np.array([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]) 21 | X_u = (X_u - self.lb) - 0.5*(self.ub - self.lb) 22 | X_b = (X_b - self.lbb) - 0.5*(self.ubb - self.lbb) 23 | X_f = (X_f - self.lb) - 0.5*(self.ub - self.lb) 24 | 25 | 26 | self.q = q 27 | self.u_0 = u_0 28 | self.ksat = 10. 29 | 30 | self.x1_u = X_u[:,0:1] # dimension N_u x 1 31 | self.x2_u = X_u[:,1:2] # dimension N_u x 1 32 | self.y_u = Y_u # dimension N_u 33 | 34 | self.x1_f = X_f[:,0:1] # dimension N_f x 1 35 | self.x2_f = X_f[:,1:2] # dimension N_f x 1 36 | 37 | # Position of the boundary 38 | self.x1_b1 = X_b[:,0:1] 39 | self.x2_b1 = X_b[:,1:2] 40 | self.x1_b2 = X_b[:,2:3] 41 | self.x2_b2 = X_b[:,3:4] 42 | self.x1_b3 = X_b[:,4:5] 43 | self.x2_b3 = X_b[:,5:6] 44 | self.x1_b4 = X_b[:,6:7] 45 | self.x2_b4 = X_b[:,7:8] 46 | 47 | # Layers of the neural networks 48 | self.layers_P_u = layers_P_u 49 | self.layers_Q = layers_Q 50 | self.layers_T = layers_T 51 | self.layers_P_k = layers_P_k 52 | 53 | # Dimensions of the inputs, outputs, latent variables 54 | self.X_dim = self.x1_u.shape[1] 55 | self.Y_u_dim = self.y_u.shape[1] 56 | self.Y_k_dim = self.y_u.shape[1] 57 | self.Y_f_dim = self.y_u.shape[1] 58 | self.Z_dim = layers_Q[-1] 59 | 60 | # Regularization parameters 61 | self.lam = lam 62 | self.beta = beta 63 | 64 | # Ratio of training for generator and discriminator 65 | self.k1 = 1 66 | self.k2 = 5 67 | 68 | # Initialize network weights and biases 69 | self.weights_P_u, self.biases_P_u = self.initialize_NN(layers_P_u) 70 | self.weights_Q, self.biases_Q = self.initialize_NN(layers_Q) 71 | self.weights_T, self.biases_T = self.initialize_NN(layers_T) 72 | self.weights_P_k, self.biases_P_k = self.initialize_NN(layers_P_k) 73 | 74 | # Define Tensorflow session 75 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 76 | 77 | # Define placeholders and computational graph 78 | self.x1_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 79 | self.x2_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 80 | self.x1_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 81 | self.x2_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 82 | self.y_u_tf = tf.placeholder(tf.float32, shape=(None, self.Y_u_dim)) 83 | self.y_k_tf = tf.placeholder(tf.float32, shape=(None, self.Y_k_dim)) 84 | self.y_f_tf = tf.placeholder(tf.float32, shape=(None, self.Y_f_dim)) 85 | 86 | self.x1_b1_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 87 | self.x2_b1_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 88 | self.x1_b2_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 89 | self.x2_b2_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 90 | self.x1_b3_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 91 | self.x2_b3_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 92 | self.x1_b4_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 93 | self.x2_b4_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 94 | 95 | self.z_b1_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 96 | self.z_b2_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 97 | self.z_b3_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 98 | self.z_b4_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 99 | self.z_u_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 100 | self.z_f_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 101 | 102 | self.y_u_pred = self.net_P_u(self.x1_u_tf, self.x2_u_tf, self.z_u_tf) 103 | self.y_b1_pred = self.get_b1(self.x1_b1_tf, self.x2_b1_tf, self.z_b1_tf) 104 | self.y_b2_pred = self.get_b2(self.x1_b2_tf, self.x2_b2_tf, self.z_b2_tf) 105 | self.y_b3_pred = self.get_b3(self.x1_b3_tf, self.x2_b3_tf, self.z_b3_tf) 106 | self.y_b4_pred = self.get_b4(self.x1_b4_tf, self.x2_b4_tf, self.z_b4_tf) 107 | self.y_k_pred = self.net_P_k(self.y_u_pred) 108 | self.y_f_pred = self.get_f(self.x1_f_tf, self.x2_f_tf, self.z_f_tf) 109 | 110 | # Generator loss (to be minimized) 111 | self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss = self.compute_generator_loss(self.x1_u_tf, self.x2_u_tf, 112 | self.y_u_pred, self.y_f_pred, self.y_b1_pred, self.y_b2_pred, 113 | self.y_b3_pred, self.y_b4_pred, self.z_u_tf) 114 | 115 | # Discriminator loss (to be minimized) 116 | self.T_loss = self.compute_discriminator_loss(self.x1_u_tf, self.x2_u_tf, self.y_u_tf, self.z_u_tf) 117 | 118 | # Define optimizer 119 | self.optimizer_KL = tf.train.AdamOptimizer(1e-4) 120 | self.optimizer_T = tf.train.AdamOptimizer(1e-4) 121 | 122 | # Define train Ops 123 | self.train_op_KL = self.optimizer_KL.minimize(self.G_loss, 124 | var_list = [self.weights_P_u, self.biases_P_u, self.weights_P_k, self.biases_P_k, 125 | self.weights_Q, self.biases_Q]) 126 | 127 | self.train_op_T = self.optimizer_T.minimize(self.T_loss, 128 | var_list = [self.weights_T, self.biases_T]) 129 | 130 | # Initialize Tensorflow variables 131 | init = tf.global_variables_initializer() 132 | self.sess.run(init) 133 | 134 | 135 | # Initialize network weights and biases using Xavier initialization 136 | def initialize_NN(self, layers): 137 | # Xavier initialization 138 | def xavier_init(size): 139 | in_dim = size[0] 140 | out_dim = size[1] 141 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 142 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, dtype=tf.float32) 143 | 144 | weights = [] 145 | biases = [] 146 | num_layers = len(layers) 147 | for l in range(0,num_layers-1): 148 | W = xavier_init(size=[layers[l], layers[l+1]]) 149 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 150 | weights.append(W) 151 | biases.append(b) 152 | return weights, biases 153 | 154 | 155 | # Evaluates the forward pass 156 | def forward_pass(self, H, layers, weights, biases): 157 | num_layers = len(layers) 158 | for l in range(0,num_layers-2): 159 | W = weights[l] 160 | b = biases[l] 161 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 162 | W = weights[-1] 163 | b = biases[-1] 164 | H = tf.add(tf.matmul(H, W), b) 165 | return H 166 | 167 | def f(self, X_normalized): 168 | return tf.zeros_like(X_normalized) 169 | 170 | # Decoder: p(y|x,z) 171 | def net_P_u(self, X1, X2, Z): 172 | Y = self.forward_pass(tf.concat([X1, X2, Z], 1), 173 | self.layers_P_u, 174 | self.weights_P_u, 175 | self.biases_P_u) 176 | return Y 177 | 178 | # Encoder: q(z|x,y) 179 | def net_Q(self, X1, X2, Y): 180 | Z = self.forward_pass(tf.concat([X1, X2, Y], 1), 181 | self.layers_Q, 182 | self.weights_Q, 183 | self.biases_Q) 184 | return Z 185 | 186 | # Discriminator 187 | def net_T(self, X1, X2, Y): 188 | T = self.forward_pass(tf.concat([X1, X2, Y], 1), 189 | self.layers_T, 190 | self.weights_T, 191 | self.biases_T) 192 | return T 193 | 194 | # Decoder: p(y|x,z) 195 | def net_P_k(self, U): 196 | Y = self.forward_pass(U, 197 | self.layers_P_k, 198 | self.weights_P_k, 199 | self.biases_P_k) 200 | return self.ksat * tf.exp(Y) 201 | 202 | 203 | def get_u(self, X1, X2, Z): 204 | z_prior = Z 205 | u = self.net_P_u(X1, X2, z_prior) 206 | return u 207 | 208 | def get_k(self, U): 209 | u = self.net_P_k(U) 210 | return u 211 | 212 | def get_b1(self, X1, X2, Z): 213 | z_prior = Z 214 | u = self.net_P_u(X1, X2, z_prior) 215 | u_x1 = tf.gradients(u, X1)[0] 216 | k = self.net_P_k(u) 217 | temp = self.q + k * u_x1 218 | return temp 219 | 220 | def get_b2(self, X1, X2, Z): 221 | z_prior = Z 222 | u = self.net_P_u(X1, X2, z_prior) 223 | u_x2 = tf.gradients(u, X2)[0] 224 | return u_x2 225 | 226 | def get_b3(self, X1, X2, Z): 227 | z_prior = Z 228 | u = self.net_P_u(X1, X2, z_prior) 229 | temp = u - self.u_0 230 | return temp 231 | 232 | def get_b4(self, X1, X2, Z): 233 | z_prior = Z 234 | u = self.net_P_u(X1, X2, z_prior) 235 | u_x2 = tf.gradients(u, X2)[0] 236 | return u_x2 237 | 238 | def get_f(self, X1, X2, Z_u): 239 | u = self.net_P_u(X1, X2, Z_u) 240 | k = self.net_P_k(u) 241 | u_x1 = tf.gradients(u, X1)[0] 242 | u_x2 = tf.gradients(u, X2)[0] 243 | f_1 = tf.gradients(k*u_x1, X1)[0] 244 | f_2 = tf.gradients(k*u_x2, X2)[0] 245 | f = f_1 + f_2 246 | return f 247 | 248 | def compute_generator_loss(self, x1_u, x2_u, y_u_pred, y_f_pred, y_b1_pred, y_b2_pred, y_b3_pred, y_b4_pred, z_u): 249 | # Encoder: q(z|x,y) 250 | z_u_prior = z_u 251 | 252 | z_u_encoder = self.net_Q(x1_u, x2_u, y_u_pred) 253 | 254 | y_u_pred = self.net_P_u(x1_u, x2_u, z_u) 255 | T_pred = self.net_T(x1_u, x2_u, y_u_pred) 256 | 257 | # KL-divergence between the data and the generator samples 258 | KL = tf.reduce_mean(T_pred) 259 | 260 | # Entropic regularization 261 | log_q = - tf.reduce_mean(tf.square(z_u_prior-z_u_encoder)) 262 | 263 | # Physics-informed loss 264 | loss_f = tf.reduce_mean(tf.square(y_f_pred)) + tf.reduce_mean(tf.square(y_b1_pred)) +\ 265 | tf.reduce_mean(tf.square(y_b2_pred)) + tf.reduce_mean(tf.square(y_b3_pred)) + tf.reduce_mean(tf.square(y_b4_pred)) 266 | 267 | # Generator loss 268 | loss = KL + (1.0-self.lam)*log_q + self.beta * loss_f 269 | 270 | return loss, KL, (1.0-self.lam)*log_q, self.beta * loss_f 271 | 272 | 273 | def compute_discriminator_loss(self, X1, X2, Y, Z): 274 | # Prior: p(z) 275 | z_prior = Z 276 | # Decoder: p(y|x,z) 277 | Y_pred = self.net_P_u(X1, X2, z_prior) 278 | 279 | # Discriminator loss 280 | T_real = self.net_T(X1, X2, Y) 281 | T_fake = self.net_T(X1, X2, Y_pred) 282 | 283 | T_real = tf.sigmoid(T_real) 284 | T_fake = tf.sigmoid(T_fake) 285 | 286 | T_loss = -tf.reduce_mean(tf.log(1.0 - T_real + 1e-8) + \ 287 | tf.log(T_fake + 1e-8)) 288 | 289 | return T_loss 290 | 291 | # Trains the model 292 | def train(self, nIter = 20000): 293 | 294 | start_time = timeit.default_timer() 295 | for it in range(nIter): 296 | 297 | # Sampling from latent spaces 298 | z_u = np.random.randn(self.x1_u.shape[0], self.Z_dim) 299 | z_f = np.random.randn(self.x1_f.shape[0], self.Z_dim) 300 | z_b1 = np.random.randn(self.x1_b1.shape[0], self.Z_dim) 301 | z_b2 = np.random.randn(self.x1_b2.shape[0], self.Z_dim) 302 | z_b3 = np.random.randn(self.x1_b3.shape[0], self.Z_dim) 303 | z_b4 = np.random.randn(self.x1_b4.shape[0], self.Z_dim) 304 | 305 | # Define a dictionary for associating placeholders with data 306 | tf_dict = {self.x1_u_tf: self.x1_u, self.x2_u_tf: self.x2_u, self.x1_f_tf: self.x1_f, self.x2_f_tf: self.x2_f, 307 | self.y_u_tf: self.y_u, self.x1_b1_tf: self.x1_b1, self.x2_b1_tf: self.x2_b1, self.x1_b2_tf: self.x1_b2, self.x2_b2_tf: self.x2_b2, 308 | self.x1_b3_tf: self.x1_b3, self.x2_b3_tf: self.x2_b3, self.x1_b4_tf: self.x1_b4, self.x2_b4_tf: self.x2_b4, 309 | self.z_u_tf: z_u, self.z_f_tf: z_f, self.z_b1_tf: z_b1, self.z_b2_tf: z_b2, self.z_b3_tf: z_b3, self.z_b4_tf: z_b4} 310 | 311 | # Run the Tensorflow session to minimize the loss 312 | for i in range(self.k1): 313 | self.sess.run(self.train_op_T, tf_dict) 314 | for j in range(self.k2): 315 | self.sess.run(self.train_op_KL, tf_dict) 316 | 317 | # Print 318 | if it % 100 == 0: 319 | elapsed = timeit.default_timer() - start_time 320 | loss_KL_value, reconv, loss_PDE = self.sess.run([self.KL_loss, self.recon_loss, self.PDE_loss], tf_dict) 321 | loss_T_value = self.sess.run(self.T_loss, tf_dict) 322 | print('It: %d, KL_loss: %.2e, Recon_loss: %.2e, PDE_loss: %.2e, T_loss: %.2e, Time: %.2f' % 323 | (it, loss_KL_value, reconv, loss_PDE, loss_T_value, elapsed)) 324 | start_time = timeit.default_timer() 325 | 326 | 327 | # Evaluates predictions at test points 328 | def predict_k(self, X_star): 329 | # Center around the origin 330 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 331 | # Predict 332 | z_u = np.random.randn(X_star.shape[0], self.Z_dim) 333 | tf_dict = {self.x1_u_tf: X_star[:,0:1], self.x2_u_tf: X_star[:,1:2], self.z_u_tf: z_u} 334 | k_star = self.sess.run(self.y_k_pred, tf_dict) 335 | return k_star / self.ksat 336 | 337 | # Evaluates predictions at test points 338 | def predict_u(self, X_star): 339 | # Center around the origin 340 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 341 | # Predict 342 | z_u = np.random.randn(X_star.shape[0], self.Z_dim) 343 | tf_dict = {self.x1_u_tf: X_star[:,0:1], self.x2_u_tf: X_star[:,1:2], self.z_u_tf: z_u} 344 | u_star = self.sess.run(self.y_u_pred, tf_dict) 345 | return u_star 346 | 347 | # Evaluates predictions at test points 348 | def predict_f(self, X_star): 349 | # Center around the origin 350 | X_star = (X_star - self.lb) - 0.5*(self.ub - self.lb) 351 | # Predict 352 | z_f = np.random.randn(X_star.shape[0], self.Z_dim) 353 | tf_dict = {self.x1_f_tf: X_star[:,0:1], self.x2_f_tf: X_star[:,1:2], self.z_f_tf: z_f} 354 | f_star = self.sess.run(self.y_f_pred, tf_dict) 355 | return f_star 356 | 357 | # Predict the k as function of u 358 | def predict_k_from_u(self, u): 359 | tf_dict = {self.y_u_pred: u} 360 | k_star = self.sess.run(self.y_k_pred, tf_dict) 361 | return k_star / self.ksat 362 | 363 | 364 | 365 | -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/nonlinear2d_data.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Darcy/Darcy_noisy/nonlinear2d_data.npz -------------------------------------------------------------------------------- /Darcy/Darcy_noisy/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') -------------------------------------------------------------------------------- /ODE/ODE.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import os 8 | os.environ['KMP_DUPLICATE_LIB_OK']='True' 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | plt.switch_backend('agg') 12 | from pyDOE import lhs 13 | 14 | from models import ODE_UQPINN 15 | 16 | import scipy.io 17 | from scipy.interpolate import griddata 18 | from pyDOE import lhs 19 | from plotting import newfig, savefig 20 | from mpl_toolkits.mplot3d import Axes3D 21 | import time 22 | import matplotlib.gridspec as gridspec 23 | from mpl_toolkits.axes_grid1 import make_axes_locatable 24 | from scipy import stats 25 | 26 | np.random.seed(1234) 27 | 28 | if __name__ == "__main__": 29 | 30 | # Number of collocation points 31 | N_f = 100 32 | 33 | # Number of testing points 34 | N_ff = 200 35 | 36 | # Number of the training data (in this example on the boundary) 37 | N_u = 20 38 | 39 | # Define the input, output, latent variable dimension 40 | X_dim = 1 41 | Y_dim = 1 42 | Z_dim = 1 43 | 44 | # Right handside of the ODE 45 | def f(X): 46 | return np.sin(np.pi * X) 47 | 48 | # Position of the collocation points 49 | X_f = np.linspace(-1.,1.,N_f)[:,None] 50 | 51 | # Position of the boundary of the problem 52 | X_ut = np.linspace(-1.,1.,2)[:,None] 53 | X_u = X_ut 54 | for i in range(N_u-1): 55 | X_u = np.vstack((X_u, X_ut)) 56 | 57 | # Generate stochastic boundary condition 58 | Y_ut = f(X_ut) 59 | Y_u = Y_ut + 0.05 * np.random.randn(2, Y_dim) 60 | for i in range(N_u-1): 61 | Y_ut = 0.05 * np.random.randn(2, Y_dim) 62 | Y_u = np.vstack((Y_u, Y_ut)) 63 | 64 | # Reference solution on the testing points 65 | X_ff = np.linspace(-1.,1.,N_ff)[:,None] 66 | Y_reff = f(X_ff) 67 | 68 | # Load the reference solution of the stochastic ODE generated by Monte Carlo 69 | data = scipy.io.loadmat('./ODE2000.mat') 70 | Exact = np.real(data['U']).T 71 | 72 | # Model creation 73 | layers_P = np.array([X_dim+Z_dim,50,50,50,50,Y_dim]) 74 | layers_Q = np.array([X_dim+Y_dim,50,50,50,50,Z_dim]) 75 | layers_T = np.array([X_dim+Y_dim,50,50,1]) 76 | model = ODE_UQPINN(X_f, X_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.5, beta = 1.) 77 | 78 | # Train the model 79 | model.train(nIter = 30000, N_u = 2*N_u, N_f = N_f) 80 | 81 | # Prediction 82 | plt.figure(1) 83 | N_samples = 2000 84 | samples_mean = np.zeros((X_ff.shape[0], N_samples)) 85 | for i in range(0, N_samples): 86 | samples_mean[:,i:i+1] = model.generate_sample(X_ff) 87 | plt.plot(X_ff, samples_mean[:,i:i+1],'k.', alpha = 0.005) 88 | plt.plot(X_ff, Y_reff, 'r*',alpha = 0.2) 89 | 90 | # Compute the mean and the variance of the prediction 91 | mu_pred = np.mean(samples_mean, axis = 1) 92 | Sigma_pred = np.var(samples_mean, axis = 1) 93 | 94 | 95 | # Plot the prediction with the uncertainty versus the reference solution 96 | ax = plt.figure(2,figsize=(7,5)) 97 | plt.xticks(fontsize=13) 98 | plt.yticks(fontsize=13) 99 | plt.plot(X_u, Y_u, 'kx', markersize = 4, label = "Boundary points") 100 | lower = mu_pred - 2.0*np.sqrt(Sigma_pred) 101 | upper = mu_pred + 2.0*np.sqrt(Sigma_pred) 102 | plt.fill_between(X_ff.flatten(), lower.flatten(), upper.flatten(), 103 | facecolor='orange', alpha=0.5, label="Two std band") 104 | plt.plot(X_ff,Y_reff,'b-', label = "Exact", linewidth=2) 105 | plt.plot(X_ff, mu_pred, 'r--', label = "Prediction", linewidth=2) 106 | plt.xlabel('$x$',fontsize=13) 107 | plt.ylabel('$u(x)$',fontsize=13) 108 | plt.legend(loc='upper left', frameon=False, prop={'size': 10}) 109 | plt.savefig('./ODEnew1.png', dpi = 600) 110 | 111 | # Compute the prediction relative error 112 | mu_pred = mu_pred[:,None] 113 | error_u = np.linalg.norm(Y_reff-mu_pred,2)/np.linalg.norm(Y_reff,2) 114 | print('Error u: %e' % (error_u)) 115 | np.save('L2_error.npy', error_u) 116 | 117 | 118 | ######### Compare the uncertainty at x = -0.5 and x = 0.5 ######## 119 | E1 = Exact[50,:][:, None] 120 | E3 = Exact[150,:][:, None] 121 | M1 = samples_mean[50,:][:, None] 122 | M3 = samples_mean[150,:][:, None] 123 | 124 | 125 | ######## Probability density kernel estimation ######## 126 | xmin, xmax = E1.min(), E1.max() 127 | X_marginal_1 = np.linspace(xmin, xmax, 100)[:,None] 128 | positions_marginal_1 = X_marginal_1.flatten() 129 | values_marginal_1 = E1.flatten() 130 | gkde = stats.gaussian_kde(values_marginal_1) 131 | KDE_marginal_1 = gkde.evaluate(positions_marginal_1) 132 | 133 | xmin, xmax = E3.min(), E3.max() 134 | X_marginal_3 = np.linspace(xmin, xmax, 100)[:,None] 135 | positions_marginal_3 = X_marginal_3.flatten() 136 | values_marginal_3 = E3.flatten() 137 | gkde = stats.gaussian_kde(values_marginal_3) 138 | KDE_marginal_3 = gkde.evaluate(positions_marginal_3) 139 | 140 | xmin, xmax = M1.min(), M1.max() 141 | X_marginal_4 = np.linspace(xmin, xmax, 100)[:,None] 142 | positions_marginal_4 = X_marginal_4.flatten() 143 | values_marginal_4 = M1.flatten() 144 | gkde = stats.gaussian_kde(values_marginal_4) 145 | KDE_marginal_4 = gkde.evaluate(positions_marginal_4) 146 | 147 | xmin, xmax = M3.min(), M3.max() 148 | X_marginal_6 = np.linspace(xmin, xmax, 100)[:,None] 149 | positions_marginal_6 = X_marginal_6.flatten() 150 | values_marginal_6 = M3.flatten() 151 | gkde = stats.gaussian_kde(values_marginal_6) 152 | KDE_marginal_6 = gkde.evaluate(positions_marginal_6) 153 | 154 | 155 | ax = plt.figure(3,figsize=(6,4.7)) 156 | plt.xticks(fontsize=13) 157 | plt.yticks(fontsize=13) 158 | plt.hist(Exact[50,:], bins = 50, density=True, alpha = 0.6, color='blue') 159 | plt.hist(samples_mean[50,:], bins = 50, density=True, alpha = 0.6, color='red') 160 | plt.plot(X_marginal_1, KDE_marginal_1, 'b-', label = 'Exact') 161 | plt.plot(X_marginal_4, KDE_marginal_4, 'r-', label = 'Prediction') 162 | plt.xlabel('$u(x = - 0.5)$',fontsize=13) 163 | plt.ylabel('$p(u)$',fontsize=13) 164 | plt.legend(loc='upper left', frameon=False, prop={'size': 13}) 165 | plt.savefig('./ODE_x50.png', dpi = 600) 166 | 167 | ax = plt.figure(5,figsize=(6,4.7)) 168 | plt.xticks(fontsize=13) 169 | plt.yticks(fontsize=13) 170 | plt.hist(Exact[150,:], bins = 50, density=True, alpha = 0.6, color='blue') 171 | plt.hist(samples_mean[150,:], bins = 50, density=True, alpha = 0.6, color='red') 172 | plt.plot(X_marginal_3, KDE_marginal_3, 'b-', label = 'Exact') 173 | plt.plot(X_marginal_6, KDE_marginal_6, 'r-', label = 'Prediction') 174 | plt.xlabel('$u(x = 0.5)$',fontsize=13) 175 | plt.ylabel('$p(u)$',fontsize=13) 176 | plt.legend(loc='upper left', frameon=False, prop={'size': 13}) 177 | plt.savefig('./ODE_x150.png', dpi = 600) 178 | 179 | 180 | -------------------------------------------------------------------------------- /ODE/ODE2000.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/ODE/ODE2000.mat -------------------------------------------------------------------------------- /ODE/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/ODE/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /ODE/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/ODE/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /ODE/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Nov 2018 3 | 4 | @author: Yibo Yang 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import timeit 10 | 11 | 12 | class ODE_UQPINN: 13 | # Initialize the class 14 | def __init__(self, X_f, X_u, Y_u, layers_P, layers_Q, layers_T, lam = 1.0, beta = 1.0): 15 | 16 | # Normalize data 17 | self.Xmean, self.Xstd = X_f.mean(0), X_f.std(0) 18 | self.Ymean, self.Ystd = Y_u.mean(0), Y_u.std(0) 19 | X_f = (X_f - self.Xmean) / self.Xstd 20 | X_u = (X_u - self.Xmean) / self.Xstd 21 | self.Jacobian = 1 / self.Xstd 22 | 23 | self.X_f = X_f 24 | self.X_u = X_u 25 | self.Y_u = Y_u 26 | 27 | self.layers_P = layers_P 28 | self.layers_Q = layers_Q 29 | self.layers_T = layers_T 30 | 31 | self.X_dim = X_u.shape[1] 32 | self.Y_dim = Y_u.shape[1] 33 | self.Z_dim = layers_Q[-1] 34 | self.lam = lam 35 | self.beta = beta 36 | 37 | self.k1 = 1 38 | self.k2 = 5 39 | 40 | # Initialize network weights and biases 41 | self.weights_P, self.biases_P = self.initialize_NN(layers_P) 42 | self.weights_Q, self.biases_Q = self.initialize_NN(layers_Q) 43 | self.weights_T, self.biases_T = self.initialize_NN(layers_T) 44 | 45 | # Define Tensorflow session 46 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 47 | 48 | # Define placeholders and computational graph 49 | self.X_u_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 50 | self.X_f_tf = tf.placeholder(tf.float32, shape=(None, self.X_dim)) 51 | self.Y_u_tf = tf.placeholder(tf.float32, shape=(None, self.Y_dim)) 52 | self.Z_u_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 53 | self.Z_f_tf = tf.placeholder(tf.float32, shape=(None, self.Z_dim)) 54 | 55 | self.Y_u_pred = self.net_P(self.X_u_tf, self.Z_u_tf) 56 | self.Y_f_pred = self.get_r(self.X_f_tf, self.Z_f_tf) 57 | 58 | # Generator loss (to be minimized) 59 | self.G_loss, self.KL_loss, self.recon_loss, self.PDE_loss = self.compute_generator_loss(self.X_u_tf, self.Y_u_tf, self.Y_u_pred, 60 | self.X_f_tf, self.Y_f_pred, self.Z_u_tf, self.Z_f_tf) 61 | 62 | # Discriminator loss (to be minimized) 63 | self.T_loss = self.compute_discriminator_loss(self.X_u_tf, self.Y_u_tf, self.Z_u_tf) 64 | 65 | # Generate samples of y given x 66 | self.sample = self.sample_generator(self.X_u_tf, self.Z_u_tf) 67 | 68 | # Compute the posterior of latent variable z 69 | self.z_posterior = self.get_z(self.X_u_tf, self.Z_u_tf) 70 | 71 | # Define optimizer 72 | self.optimizer_KL = tf.train.AdamOptimizer(1e-4) 73 | self.optimizer_T = tf.train.AdamOptimizer(1e-4) 74 | 75 | # Define train Ops 76 | self.train_op_KL = self.optimizer_KL.minimize(self.G_loss, 77 | var_list = [self.weights_P, self.biases_P, 78 | self.weights_Q, self.biases_Q]) 79 | 80 | self.train_op_T = self.optimizer_T.minimize(self.T_loss, 81 | var_list = [self.weights_T, self.biases_T]) 82 | 83 | # Initialize Tensorflow variables 84 | init = tf.global_variables_initializer() 85 | self.sess.run(init) 86 | 87 | 88 | # Initialize network weights and biases using Xavier initialization 89 | def initialize_NN(self, layers): 90 | # Xavier initialization 91 | def xavier_init(size): 92 | in_dim = size[0] 93 | out_dim = size[1] 94 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 95 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, dtype=tf.float32) 96 | 97 | weights = [] 98 | biases = [] 99 | num_layers = len(layers) 100 | for l in range(0,num_layers-1): 101 | W = xavier_init(size=[layers[l], layers[l+1]]) 102 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 103 | weights.append(W) 104 | biases.append(b) 105 | return weights, biases 106 | 107 | 108 | # Evaluates the forward pass 109 | def forward_pass(self, H, layers, weights, biases): 110 | num_layers = len(layers) 111 | for l in range(0,num_layers-2): 112 | W = weights[l] 113 | b = biases[l] 114 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 115 | W = weights[-1] 116 | b = biases[-1] 117 | H = tf.add(tf.matmul(H, W), b) 118 | return H 119 | 120 | # Forcing term (right hand of the ODE) 121 | def f(self, X_normalized): # 122 | X = self.Xstd * X_normalized + self.Xmean 123 | return - np.pi ** 2 * tf.sin(np.pi*X) - np.pi * tf.cos(np.pi * X) * tf.sin(np.pi * X) ** 2 124 | 125 | # Decoder: p(y|x,z) 126 | def net_P(self, X, Z): 127 | Y = self.forward_pass(tf.concat([X, Z], 1), 128 | self.layers_P, 129 | self.weights_P, 130 | self.biases_P) 131 | return Y 132 | 133 | # Encoder: q(z|x,y) 134 | def net_Q(self, X, Y): 135 | Z = self.forward_pass(tf.concat([X, Y], 1), 136 | self.layers_Q, 137 | self.weights_Q, 138 | self.biases_Q) 139 | return Z 140 | 141 | # Discriminator 142 | def net_T(self, X, Y): 143 | T = self.forward_pass(tf.concat([X, Y], 1), 144 | self.layers_T, 145 | self.weights_T, 146 | self.biases_T) 147 | return T 148 | 149 | # Physics-Informed neural network prediction 150 | def get_u(self, X, Z): 151 | z_prior = Z 152 | u = self.net_P(X, z_prior) 153 | return u 154 | 155 | # Physics-Informed residual on the collocation points 156 | def get_r(self, X, Z): 157 | z_prior = Z 158 | u = self.net_P(X, z_prior) 159 | u_x = tf.gradients(u, X)[0] 160 | u_xx = tf.gradients(u_x, X)[0] 161 | f = self.f(X) 162 | r = (self.Jacobian ** 2) * u_xx - (self.Jacobian) * (u ** 2) *u_x - f 163 | return r 164 | 165 | # Compute the generator loss 166 | def compute_generator_loss(self, X_u, Y_u, Y_u_pred, X_f, Y_f_pred, Z_u, Z_f): 167 | # Prior: 168 | z_u_prior = Z_u 169 | z_f_prior = Z_f 170 | # Encoder: q(z|x,y) 171 | z_u_encoder = self.net_Q(X_u, Y_u_pred) 172 | z_f_encoder = self.net_Q(X_f, Y_f_pred) 173 | # Discriminator loss 174 | Y_pred = self.net_P(X_u, Z_u) 175 | T_pred = self.net_T(X_u, Y_pred) 176 | 177 | # KL-divergence between the data distribution and the model distribution 178 | KL = tf.reduce_mean(T_pred) 179 | 180 | # Entropic regularization 181 | log_q = - tf.reduce_mean(tf.square(z_u_prior-z_u_encoder)) 182 | 183 | # Physics-informed loss 184 | loss_f = tf.reduce_mean(tf.square(Y_f_pred)) 185 | 186 | # Generator loss 187 | loss = KL + (1.0-self.lam)*log_q + self.beta * loss_f 188 | 189 | return loss, KL, (1.0-self.lam)*log_q, self.beta * loss_f 190 | 191 | # Compute the discriminator loss 192 | def compute_discriminator_loss(self, X, Y, Z): 193 | # Prior: p(z) 194 | z_prior = Z 195 | # Decoder: p(y|x,z) 196 | Y_pred = self.net_P(X, z_prior) 197 | 198 | # Discriminator loss 199 | T_real = self.net_T(X, Y) 200 | T_fake = self.net_T(X, Y_pred) 201 | 202 | T_real = tf.sigmoid(T_real) 203 | T_fake = tf.sigmoid(T_fake) 204 | 205 | T_loss = -tf.reduce_mean(tf.log(1.0 - T_real + 1e-8) + \ 206 | tf.log(T_fake + 1e-8)) 207 | 208 | return T_loss 209 | 210 | 211 | # Fetches a mini-batch of data 212 | def fetch_minibatch(self,X, Y, N_batch): 213 | N = X.shape[0] 214 | idx = np.random.choice(N, N_batch, replace=False) 215 | X_batch = X[idx,:] 216 | Y_batch = Y[idx,:] 217 | return X_batch, Y_batch 218 | 219 | 220 | # Trains the model 221 | def train(self, nIter = 20000, N_u = 2, N_f = 100): 222 | 223 | start_time = timeit.default_timer() 224 | for it in range(nIter): 225 | 226 | # Sampling from the latent space for data and collocation points 227 | Z_u = np.random.randn(N_u, self.Z_dim) 228 | Z_f = np.random.randn(N_f, self.Z_dim) 229 | 230 | # Define a dictionary for associating placeholders with data 231 | tf_dict = {self.X_u_tf: self.X_u, self.Y_u_tf: self.Y_u, self.X_f_tf: self.X_f, 232 | self.Z_u_tf: Z_u, self.Z_f_tf: Z_f} 233 | 234 | # Run the Tensorflow session to minimize the loss 235 | for i in range(self.k1): 236 | self.sess.run(self.train_op_T, tf_dict) 237 | for j in range(self.k2): 238 | self.sess.run(self.train_op_KL, tf_dict) 239 | 240 | # Print 241 | if it % 100 == 0: 242 | elapsed = timeit.default_timer() - start_time 243 | loss_KL_value, reconv, loss_PDE = self.sess.run([self.KL_loss, self.recon_loss, self.PDE_loss], tf_dict) 244 | loss_T_value = self.sess.run(self.T_loss, tf_dict) 245 | print('It: %d, KL_loss: %.2e, Recon_loss: %.2e, PDE_loss: %.2e, T_loss: %.2e, Time: %.2f' % 246 | (it, loss_KL_value, reconv, loss_PDE, loss_T_value, elapsed)) 247 | start_time = timeit.default_timer() 248 | 249 | # Generate samples of y given x by sampling from the latent space z 250 | def sample_generator(self, X, Z): 251 | # Prior: 252 | z_prior = Z 253 | # Decoder: p(y|x,z) 254 | Y_pred = self.net_P(X, z_prior) 255 | return Y_pred 256 | 257 | # Predict y given x 258 | def generate_sample(self, X_star): 259 | X_star = (X_star - self.Xmean) / self.Xstd 260 | Z = np.random.randn(X_star.shape[0], self.Z_dim) 261 | tf_dict = {self.X_u_tf: X_star, self.Z_u_tf: Z} 262 | Y_star = self.sess.run(self.sample, tf_dict) 263 | Y_star = Y_star 264 | return Y_star 265 | 266 | # Get the posterior of z over the latent space 267 | def get_z(self, X, Z): 268 | Y_pred = self.net_P(X, Z) 269 | z = self.net_Q(X, Y_pred) 270 | return z 271 | 272 | -------------------------------------------------------------------------------- /ODE/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Adversarial Uncertainty Quantification in Physics-Informed Neural Networks 2 | We present a deep learning framework for quantifying and propagating uncertainty in systems governed by non-linear differential equations using physics-informed neural networks. Specifically, we employ latent variable models to construct probabilistic representations for the system states, and put forth an adversarial inference procedure for training them on data, while constraining their predictions to satisfy given physical laws expressed by partial differential equations. Such physics-informed constraints provide a regularization mechanism for effectively training deep generative models as surrogates of physical systems in which the cost of data acquisition is high, and training data-sets are typically small. This provides a flexible framework for characterizing uncertainty in the outputs of physical systems due to randomness in their inputs or noise in their observations that entirely bypasses the need for repeatedly sampling expensive experiments or numerical simulators. We demonstrate the effectiveness of our approach through a series of examples involving uncertainty propagation in non-linear conservation laws, and the discovery of constitutive laws for flow through porous media directly from noisy data. 3 | 4 | This paper is published on Journal of Computational Physics. 5 | 6 | - Yibo Yang, Paris Perdikaris, 7 | Adversarial uncertainty quantification in physics-informed neural networks, 8 | Journal of Computational Physics, 9 | 2019, 10 | ISSN 0021-9991, 11 | https://doi.org/10.1016/j.jcp.2019.05.027. 12 | 13 | 14 | ## Citation 15 | ``` 16 | @article{yang2019adversarial, 17 | title={Adversarial uncertainty quantification in physics-informed neural networks}, 18 | author={Yang, Yibo and Perdikaris, Paris}, 19 | journal={Journal of Computational Physics}, 20 | volume={394}, 21 | pages={136--152}, 22 | year={2019}, 23 | publisher={Elsevier} 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /Tutorial/Entropic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Tutorial/Entropic.png -------------------------------------------------------------------------------- /Tutorial/GANs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Tutorial/GANs.png -------------------------------------------------------------------------------- /Tutorial/Mode_Collapse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Tutorial/Mode_Collapse.png -------------------------------------------------------------------------------- /Tutorial/ODE2000.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Tutorial/ODE2000.mat -------------------------------------------------------------------------------- /Tutorial/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PredictiveIntelligenceLab/UQPINNs/06933d6c3656db1d2f7147185daa54ea408b1c04/Tutorial/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /Tutorial/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib as mpl 3 | #mpl.use('pgf') 4 | 5 | def figsize(scale, nplots = 1): 6 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 7 | inches_per_pt = 1.0/72.27 # Convert pt to inch 8 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 9 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 10 | fig_height = nplots*fig_width*golden_mean # height in inches 11 | fig_size = [fig_width,fig_height] 12 | return fig_size 13 | 14 | pgf_with_latex = { # setup matplotlib to use latex for output 15 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 16 | "text.usetex": True, # use LaTeX to write all text 17 | "font.family": "serif", 18 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 19 | "font.sans-serif": [], 20 | "font.monospace": [], 21 | "axes.labelsize": 10, # LaTeX default is 10pt font. 22 | "font.size": 10, 23 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 24 | "xtick.labelsize": 8, 25 | "ytick.labelsize": 8, 26 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 27 | "pgf.preamble": [ 28 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 29 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 30 | ] 31 | } 32 | mpl.rcParams.update(pgf_with_latex) 33 | 34 | import matplotlib.pyplot as plt 35 | 36 | # I make my own newfig and savefig functions 37 | def newfig(width, nplots = 1): 38 | fig = plt.figure(figsize=figsize(width, nplots)) 39 | ax = fig.add_subplot(111) 40 | return fig, ax 41 | 42 | def savefig(filename, crop = True): 43 | if crop == True: 44 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 45 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 46 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 47 | else: 48 | # plt.savefig('{}.pgf'.format(filename)) 49 | plt.savefig('{}.pdf'.format(filename)) 50 | plt.savefig('{}.eps'.format(filename)) 51 | 52 | ## Simple plot 53 | #fig, ax = newfig(1.0) 54 | # 55 | #def ema(y, a): 56 | # s = [] 57 | # s.append(y[0]) 58 | # for t in range(1, len(y)): 59 | # s.append(a * y[t] + (1-a) * s[t-1]) 60 | # return np.array(s) 61 | # 62 | #y = [0]*200 63 | #y.extend([20]*(1000-len(y))) 64 | #s = ema(y, 0.01) 65 | # 66 | #ax.plot(s) 67 | #ax.set_xlabel('X Label') 68 | #ax.set_ylabel('EMA') 69 | # 70 | #savefig('ema') --------------------------------------------------------------------------------