├── README.md ├── Stefan 1D1P ├── Stefan 1D1P.md ├── Stefan1D_direct.py ├── Stefan1D_inverse_II.py ├── Stefan_1D_inverse_I.py └── Stefan_models_tf.py ├── Stefan 1D2P ├── Stefan 1D2P.md ├── Stefan1D_2P_direct.py ├── Stefan1D_2P_inverse_I.py ├── Stefan1D_2P_inverse_II.py ├── Stefan1D_2P_inverse_III.py └── Stefan1D_2p_models_tf.py └── Stefan 2D1P ├── Stefan 2D1P.md ├── Stefan2D_direct.py ├── Stefan2D_inverse_I.py ├── Stefan2D_inverse_II.py └── Stefan2D_models_tf.py /README.md: -------------------------------------------------------------------------------- 1 | ## Deep learning of free boundary and Stefan problems 2 | 3 | Code and data (available upon request) accompanying the manuscript titled "Deep learning of free boundary and Stefan problems", authored by Sifan Wang, and Paris Perdikaris. 4 | 5 | ## Abstract 6 | 7 | Free boundary problems appear naturally in numerous areas of mathematics, science and engineering. These 8 | problems present a great computational challenge because they necessitate numerical methods that can yield an accurate approximation of free boundaries and complex dynamic interfaces. In this work, we propose a multi-network model based on physics-informed neural networks to tackle a general class of forward and inverse free boundary problems called Stefan problems. Specifically, we approximate the unknown solution as well as any moving boundaries by two deep neural networks. Besides, we formulate a new type of inverse Stefan problems that aim to reconstruct the solution and free boundaries directly from sparse and noisy measurements. We demonstrate the effectiveness of our approach in a series of benchmarks spanning different types of Stefan problems, and illustrate how the proposed framework can accurately recover solutions of partial differential equations with moving boundaries and dynamic interfaces. 9 | 10 | ## Citation 11 | 12 | @article{wang2020deep, 13 | title={Deep learning of free boundary and Stefan problems}, 14 | author={Wang, Sifan and Perdikaris, Paris}, 15 | journal={arXiv preprint arXiv:2006.05311}, 16 | year={2020} 17 | } 18 | -------------------------------------------------------------------------------- /Stefan 1D1P/Stefan 1D1P.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Stefan 1D1P/Stefan1D_direct.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Feb 25 20:27:53 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from matplotlib.ticker import LogFormatter 12 | from scipy.interpolate import griddata 13 | import seaborn as sns 14 | from Stefan_models_tf import Sampler, DataSampler, Stefan1D_direct 15 | import pandas as pd 16 | import os 17 | 18 | 19 | if __name__ == '__main__': 20 | 21 | # Exact Solution 22 | def u(z): 23 | # x = (x, t) 24 | x = z[:, 0: 1] 25 | t = z[:, 1: 2] 26 | u = - 0.5 * x**2 + 2 * x - t - 0.5 27 | return u 28 | 29 | # Exact free boundary 30 | def s(x): 31 | t = x[:, 1: 2] 32 | s = 2 - np.sqrt(3 - 2 * t) 33 | return s 34 | 35 | def h(x): 36 | # du / dx (s(t), t) = h(t) Stefan Neumann Condition 37 | t = x[:, 1: 2] 38 | h = np.sqrt(3 - 2 * t) 39 | return h 40 | 41 | def g(x): 42 | # - du /dx (0, t) = g(t) Neumann Condition 43 | N = x.shape[0] 44 | return 2.0 * np.ones((N,1)) 45 | 46 | def u_0(x): 47 | # Initial Condition 48 | x = x[:, 0: 1] 49 | return - 0.5 * x**2 + 2 * x - 0.5 50 | 51 | def S_0(x): 52 | # Initial Condtion for S0 53 | S_0 = 2 - np.sqrt(3) 54 | N = x.shape[0] 55 | return S_0 * np.ones((N, 1)) 56 | 57 | # Domain boundaries 58 | ic_coords = np.array([[0.0, 0.0], 59 | [1.0, 0.0]]) 60 | Nc_coords = np.array([[0.0, 0.0], 61 | [0.0, 1.0]]) 62 | dom_coords = np.array([[0.0, 0.0], 63 | [1.0, 1.0]]) 64 | 65 | # Create boundary conditions samplers 66 | ics_sampler = Sampler(2, ic_coords, lambda x: u_0(x), name='Initial Condition') 67 | Ncs_sampler = Sampler(2, Nc_coords, lambda x: h(x), name='Stefan Neumann Boundary Condition') # because t_u_tf 68 | 69 | # Create residual sampler 70 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 71 | 72 | # Define model 73 | layers_u = [2, 100, 100, 100, 1] 74 | layers_s = [1, 100, 100, 100, 1] # or we can map s to (t, s(t)) 75 | model = Stefan1D_direct(layers_u, layers_s, ics_sampler, Ncs_sampler, res_sampler) 76 | 77 | model.train(nIter=40000, batch_size=128) 78 | 79 | ### Save Model ### 80 | #################### 81 | # save path 82 | relative_path = '/results/'+ 'Stefan1D_direct/' 83 | current_directory = os.getcwd() 84 | save_results_to = current_directory + relative_path 85 | if not os.path.exists(save_results_to): 86 | os.makedirs(save_results_to) 87 | 88 | model.saver.save(model.sess, save_results_to, global_step=model.global_step) 89 | 90 | # Test data 91 | nn = 200 92 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 93 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 94 | X, T = np.meshgrid(x, t) 95 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 96 | 97 | # Exact solutions 98 | u_star = u(X_star) 99 | s_star = s(X_star) 100 | 101 | # Predictions 102 | u_pred = model.predict_u(X_star) 103 | s_pred = model.predict_s(X_star) 104 | 105 | # Errors 106 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 107 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 108 | 109 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 110 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 111 | 112 | 113 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 114 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 115 | 116 | for i in range(nn): 117 | for j in range(nn): 118 | X_ij = np.array([X[i,j], T[i,j]]).reshape(1,2) 119 | u_ij = u(X_ij) 120 | s_ij = s(X_ij) 121 | if X[i,j] > s_ij: 122 | U_star[i,j] = np.nan 123 | U_pred[i,j] = np.nan 124 | 125 | t = np.linspace(0,1, 100)[:, None] 126 | x = np.zeros_like(t) 127 | x_star = np.concatenate((x,t), axis=1) 128 | 129 | s_star = s(x_star) 130 | s_pred = model.predict_s(x_star) 131 | error_s = np.abs(s_star - s_pred) 132 | 133 | fig_1 = plt.figure(1, figsize=(18, 5)) 134 | plt.subplot(1, 3, 1) 135 | plt.plot(s_star, t) 136 | plt.pcolor(X, T, U_star, cmap='jet') 137 | plt.colorbar() 138 | plt.xlabel(r'$x$') 139 | plt.ylabel(r'$t$') 140 | plt.title('Exact $u(x,t)$') 141 | 142 | plt.subplot(1, 3, 2) 143 | plt.pcolor(X, T, U_pred, cmap='jet') 144 | plt.plot(s_pred, t) 145 | plt.colorbar() 146 | plt.xlabel(r'$x$') 147 | plt.ylabel(r'$t$') 148 | plt.title('Predicted $u(x,t)$') 149 | 150 | plt.subplot(1, 3, 3) 151 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 152 | plt.colorbar(format='%.0e') 153 | plt.xlabel(r'$x$') 154 | plt.ylabel(r'$t$') 155 | plt.title('Absolute Error') 156 | 157 | plt.tight_layout() 158 | plt.show() 159 | 160 | fig_2 = plt.figure(2, figsize=(12, 5)) 161 | plt.subplot(1, 2, 1) 162 | plt.plot(t, s_star, label='Exact') 163 | plt.plot(t, s_pred, '--', label='Predicted') 164 | plt.xlabel(r'$t$') 165 | plt.ylabel(r'$s(t)$') 166 | plt.title('Moving Boundary') 167 | plt.legend() 168 | 169 | plt.subplot(1, 2, 2) 170 | plt.plot(t, error_s) 171 | plt.xlabel(r'$t$') 172 | plt.ylabel(r'Point-wise Error') 173 | plt.title('Absolute Error') 174 | plt.yscale('log') 175 | plt.tight_layout() 176 | plt.show() 177 | 178 | 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /Stefan 1D1P/Stefan1D_inverse_II.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from scipy.interpolate import griddata 5 | import seaborn as sns 6 | from Stefan_models_tf import Sampler, DataSampler, Stefan1D_inverse_II 7 | import pandas as pd 8 | import os 9 | 10 | if __name__ == '__main__': 11 | 12 | def u(z): 13 | # x = (x, t) 14 | # u(x) = e^{t -x} - 1 15 | x = z[:, 0: 1] 16 | t = z[:, 1: 2] 17 | u = - 0.5 * x**2 + 2 * x - t - 0.5 18 | return u 19 | 20 | def s(x): 21 | t = x[:, 1: 2] 22 | s = 2 - np.sqrt(3 - 2 * t) 23 | return s 24 | 25 | def g(x): 26 | t = t = x[:, 1: 2] 27 | g = np.sqrt(3 - 2 * t) 28 | 29 | return g 30 | 31 | def z(x): 32 | z = 2 - np.sqrt(3) 33 | N = x.shape[0] 34 | return z * np.ones((N, 1)) 35 | 36 | 37 | # Domain boundaries 38 | bc_coords = np.array([[0.0, 0.0], 39 | [0.0, 0.0]]) 40 | Nc_coords = np.array([[0.0, 0.0], 41 | [0.0, 1.0]]) 42 | dom_coords = np.array([[0.0, 0.0], 43 | [1.0, 1.0]]) 44 | 45 | 46 | # Create boundary conditions samplers 47 | bcs_sampler = Sampler(2, bc_coords, lambda x: z(x), name='Boundary Condition') 48 | Ncs_sampler = Sampler(2, dom_coords, lambda x: g(x), name='Neumann Boundary Condition') # because t_u_tf 49 | 50 | # Create residual sampler 51 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 52 | 53 | # Construct total data set 54 | data_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 55 | data_X, data_u = data_sampler.sample(10**5) # Sample enough points 56 | 57 | # Select data points in side the physcial domain 58 | mask = data_X[:,0:1] < s(data_X) 59 | data_X = data_X[mask[:,0]] 60 | data_u = data_u[mask[:,0]] 61 | 62 | num = 10 63 | data_X, data_u = DataSampler(data_X, data_u).sample(num) 64 | data_sampler = DataSampler(data_X, data_u) 65 | 66 | # Define model 67 | layers_u = [2, 100, 100, 100, 1] 68 | layers_s = [1, 100, 100, 100, 1] # or we can map s to (t, s(t)) 69 | model = Stefan1D_inverse_II(layers_u, layers_s, bcs_sampler, Ncs_sampler, res_sampler, data_sampler) 70 | 71 | # Train the model 72 | model.train(nIter=40000, batch_size=128) 73 | 74 | # Test data 75 | nn = 200 76 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 77 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 78 | X, T = np.meshgrid(x, t) 79 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 80 | X_bc_star = np.hstack((np.zeros_like(t), t)) 81 | 82 | # Exact solutions 83 | u_star = u(X_star) 84 | s_star = s(X_star) 85 | u_bc_star = u(X_bc_star) 86 | 87 | # Predictions 88 | u_pred = model.predict_u(X_star) 89 | s_pred = model.predict_s(X_star) 90 | u_bc_pred = model.predict_u(X_bc_star) 91 | u_x_bc_pred = model.predict_u_x(X_bc_star) 92 | 93 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 94 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 95 | 96 | u_star_grid = U_star.copy() 97 | u_pred_grid = U_pred.copy() 98 | 99 | for i in range(nn): 100 | for j in range(nn): 101 | X_ij = np.array([X[i,j], T[i,j]]).reshape(1,2) 102 | u_ij = u(X_ij) 103 | s_ij = s(X_ij) 104 | if X[i,j] > s_ij: 105 | U_star[i,j] = np.nan 106 | U_pred[i,j] = np.nan 107 | u_star_grid[i,j] = 0 108 | u_pred_grid[i,j] = 0 109 | 110 | # Errors 111 | error_u = np.linalg.norm(u_star_grid - u_pred_grid, 2) / np.linalg.norm(u_star_grid, 2) 112 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 113 | 114 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 115 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 116 | 117 | # Plot 118 | t = np.linspace(0,1, nn)[:, None] 119 | x = np.zeros_like(t) 120 | x_star = np.concatenate((x,t), axis=1) 121 | 122 | s_star = s(x_star) 123 | s_pred = model.predict_s(x_star) 124 | error_s = np.abs(s_star - s_pred) 125 | 126 | # Plot for solution u 127 | fig_1 = plt.figure(1, figsize=(18, 5)) 128 | plt.subplot(1, 3, 1) 129 | plt.plot(s_star, t) 130 | plt.plot(data_X[:,0:1], data_X[:,1:2], 'x', color='black') 131 | plt.pcolor(X, T, U_star, cmap='jet') 132 | plt.colorbar() 133 | plt.xlabel(r'$x$') 134 | plt.ylabel(r'$t$') 135 | plt.title('Exact $u(x,t)$') 136 | 137 | plt.subplot(1, 3, 2) 138 | plt.pcolor(X, T, U_pred, cmap='jet') 139 | plt.plot(s_pred, t) 140 | plt.colorbar() 141 | plt.xlabel(r'$x$') 142 | plt.ylabel(r'$t$') 143 | plt.title('Predicted $u(x,t)$') 144 | 145 | plt.subplot(1, 3, 3) 146 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 147 | plt.colorbar(format='%.0e') 148 | plt.xlabel(r'$x$') 149 | plt.ylabel(r'$t$') 150 | plt.title('Absolute Error') 151 | 152 | plt.tight_layout() 153 | plt.show() 154 | 155 | 156 | # Plot for solution s 157 | 158 | t = np.linspace(0,1, nn)[:, None] 159 | x = np.zeros_like(t) 160 | x_star = np.concatenate((x,t), axis=1) 161 | 162 | s_star = s(x_star) 163 | s_pred = model.predict_s(x_star) 164 | 165 | fig_2 = plt.figure(2, figsize=(12, 5)) 166 | plt.subplot(1, 2, 1) 167 | plt.plot(t, s_star, label='Exact') 168 | plt.plot(t, s_pred, '--', label='Predicted') 169 | plt.xlabel(r'$t$') 170 | plt.ylabel(r'$s(t)$') 171 | plt.title('Moving Boundary') 172 | plt.legend() 173 | 174 | plt.subplot(1, 2, 2) 175 | plt.plot(t, error_s) 176 | plt.xlabel(r'$t$') 177 | plt.ylabel(r'Point-wise Error') 178 | plt.title('Absolute Error') 179 | plt.yscale('log') 180 | plt.tight_layout() 181 | plt.show() 182 | 183 | 184 | -------------------------------------------------------------------------------- /Stefan 1D1P/Stefan_1D_inverse_I.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Feb 25 20:27:53 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from scipy.interpolate import griddata 12 | import seaborn as sns 13 | from Stefan_models_tf import Sampler, DataSampler, Stefan1D_inverse_I 14 | import pandas as pd 15 | import os 16 | 17 | 18 | if __name__ == '__main__': 19 | 20 | def u(z): 21 | # x = (x, t) 22 | x = z[:, 0: 1] 23 | t = z[:, 1: 2] 24 | u = - 0.5 * x**2 + 2 * x - t - 0.5 25 | return u 26 | 27 | def s(x): 28 | t = x[:, 1: 2] 29 | s = 2 - np.sqrt(3 - 2 * t) 30 | return s 31 | 32 | def h(x): 33 | # du / dx (s(t), t) = h(t) Stefan Neumann Condition 34 | t = x[:, 1: 2] 35 | h = np.sqrt(3 - 2 * t) 36 | return h 37 | 38 | def g(x): 39 | # - du /dx (0, t) = g(t) Neumann Condition 40 | N = x.shape[0] 41 | return 2.0 * np.ones((N,1)) 42 | 43 | def u_0(x): 44 | # Initial Condition 45 | x = x[:, 0: 1] 46 | return - 0.5 * x**2 + 2 * x - 0.5 47 | 48 | def S_0(x): 49 | # Initial Condtion for S(0) 50 | S_0 = 2 - np.sqrt(3) 51 | N = x.shape[0] 52 | return S_0 * np.ones((N, 1)) 53 | 54 | # Domain boundaries 55 | ic_coords = np.array([[0.0, 0.0], 56 | [1.0, 0.0]]) 57 | Nc_coords = np.array([[0.0, 0.0], 58 | [0.0, 1.0]]) 59 | dom_coords = np.array([[0.0, 0.0], 60 | [1.0, 1.0]]) 61 | 62 | # Create boundary conditions samplers 63 | ics_sampler = Sampler(2, ic_coords, lambda x: u_0(x), name='Initial Condition') 64 | Ncs_sampler = Sampler(2, Nc_coords, lambda x: h(x), name='Stefan Neumann Boundary Condition') # because t_u_tf 65 | 66 | # Create residual sampler 67 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 68 | 69 | # Define model 70 | layers_u = [2, 100, 100, 100, 1] 71 | layers_s = [1, 100, 100, 100, 1] # or we can map s to (t, s(t)) 72 | 73 | model = Stefan1D_inverse_I(layers_u, ics_sampler, Ncs_sampler, res_sampler) 74 | 75 | # Train the model 76 | model.train(nIter=40000, batch_size=128) 77 | 78 | # Test data 79 | nn = 200 80 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 81 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 82 | 83 | X, T = np.meshgrid(x, t) 84 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 85 | X_bc_star = np.hstack((np.zeros_like(t), t)) 86 | 87 | # Exact solutions 88 | u_star = u(X_star) 89 | u_bc_star = u(X_bc_star) 90 | s_star = s(X_star) 91 | 92 | # Predictions 93 | u_pred = model.predict_u(X_star) 94 | u_bc_pred= model.predict_u(X_bc_star) 95 | u_x_bc_pred= model.predict_u_x(X_bc_star) 96 | 97 | # errors 98 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 99 | error_u_bc = np.linalg.norm(u_bc_pred - u_bc_star, 2) / np.linalg.norm(u_bc_star, 2) 100 | error_u_x_bc = np.linalg.norm(u_x_bc_pred - 2, 2) \ 101 | / np.linalg.norm(2 * np.ones_like(u_x_bc_pred), 2) 102 | 103 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 104 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 105 | 106 | u_star_grid = U_star.copy() 107 | u_pred_grid = U_pred.copy() 108 | 109 | for i in range(nn): 110 | for j in range(nn): 111 | X_ij = np.array([X[i,j], T[i,j]]).reshape(1,2) 112 | u_ij = u(X_ij) 113 | s_ij = s(X_ij) 114 | if X[i,j] > s_ij: 115 | U_star[i,j] = np.nan 116 | U_pred[i,j] = np.nan 117 | u_star_grid[i,j] = 0 118 | u_pred_grid[i,j] = 0 119 | 120 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 121 | print('Relative L2 error_u_bc: {:.2e}'.format(error_u_bc)) 122 | print('Relative L2 error_u_x_bc: {:.2e}'.format(error_u_x_bc)) 123 | 124 | 125 | # Plots for u 126 | t = np.linspace(0,1, 100)[:, None] 127 | x = np.zeros_like(t) 128 | x_star = np.concatenate((x,t), axis=1) 129 | 130 | s_star = s(x_star) 131 | s_pred = model.predict_s(x_star) 132 | error_s = np.abs(s_star - s_pred) 133 | 134 | fig_1 = plt.figure(1, figsize=(18, 5)) 135 | plt.subplot(1, 3, 1) 136 | 137 | plt.plot(s_star, t, 'black') 138 | plt.pcolor(X, T, U_star, cmap='jet') 139 | plt.colorbar() 140 | plt.xlabel(r'$x$') 141 | plt.ylabel(r'$t$') 142 | plt.title('Exact $u(x,t)$') 143 | 144 | plt.subplot(1, 3, 2) 145 | plt.pcolor(X, T, U_pred, cmap='jet') 146 | plt.plot(s_star, t, 'black') 147 | plt.colorbar() 148 | plt.xlabel(r'$x$') 149 | plt.ylabel(r'$t$') 150 | plt.title('Predicted $u(x,t)$') 151 | 152 | plt.subplot(1, 3, 3) 153 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 154 | plt.colorbar(format='%.0e') 155 | plt.xlabel(r'$x$') 156 | plt.ylabel(r'$t$') 157 | plt.title('Absolute Error') 158 | 159 | plt.tight_layout() 160 | plt.show() 161 | 162 | 163 | # Plot for Neumann boundary condition 164 | X_bc_star = np.hstack((np.zeros_like(t), t)) 165 | X_bc_star = np.hstack((np.zeros_like(t), t)) 166 | u_bc_star = u(X_bc_star) 167 | 168 | u_bc_pred= model.predict_u(X_bc_star) 169 | u_bc_pred= model.predict_u(X_bc_star) 170 | u_x_bc_pred= model.predict_u_x(X_bc_star) 171 | 172 | fig_2 = plt.figure(2, figsize=(12,5)) 173 | plt.subplot(1, 2, 1) 174 | plt.plot(t, u_bc_star, label='Exact') 175 | plt.plot(t, u_bc_pred, '--', label='Predicted') 176 | plt.xlabel(r'$t$') 177 | plt.ylabel(r'$u(0,t)$') 178 | plt.title('Boundary Condition') 179 | plt.legend() 180 | 181 | plt.subplot(1, 2, 2) 182 | plt.plot(t, 2 * np.ones_like(t), label='Exact') 183 | plt.plot(t, u_x_bc_pred, '--', label='Predicted') 184 | plt.xlabel(r'$t$') 185 | plt.ylabel(r'$u_x(0,t)$') 186 | plt.ylim(1.5,2.5) 187 | plt.title('Neumann Condition') 188 | 189 | plt.legend() 190 | plt.tight_layout() 191 | plt.show() 192 | -------------------------------------------------------------------------------- /Stefan 1D1P/Stefan_models_tf.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import timeit 4 | 5 | class Sampler: 6 | # Initialize the class 7 | def __init__(self, dim, coords, func, name=None): 8 | self.dim = dim 9 | self.coords = coords 10 | self.func = func 11 | self.name = name 12 | 13 | def sample(self, N): 14 | x = self.coords[0:1, :] + (self.coords[1:2, :] - self.coords[0:1, :]) * np.random.uniform(0, 1, size=(N, self.dim)) 15 | y = self.func(x) 16 | return x, y 17 | 18 | class DataSampler: 19 | # Initialize the class 20 | def __init__(self, X, Y, name = None): 21 | self.X = X 22 | self.Y = Y 23 | self.N = self.X.shape[0] 24 | 25 | def sample(self, batch_size): 26 | idx = np.random.choice(self.N, batch_size, replace=True) 27 | X_batch = self.X[idx, :] 28 | Y_batch = self.Y[idx, :] 29 | return X_batch, Y_batch 30 | 31 | class Stefan1D_direct: 32 | def __init__(self, layers_u, layers_s, ics_sampler, Ncs_sampler, res_sampler): 33 | # Normalization constants 34 | X, _ = res_sampler.sample(np.int32(1e5)) 35 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 36 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 37 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 38 | 39 | # Samplers 40 | self.ics_sampler = ics_sampler 41 | self.Ncs_sampler = Ncs_sampler 42 | self.res_sampler = res_sampler 43 | 44 | # Initialize network weights and biases 45 | self.layers_u = layers_u 46 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 47 | 48 | self.layers_s = layers_s 49 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 50 | 51 | # Define Tensorflow session 52 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 53 | 54 | # Define placeholders and computational graph 55 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 56 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 57 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 58 | self.s_tf = tf.placeholder(tf.float32, shape=(None, 1)) 59 | 60 | self.x_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 61 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 62 | self.u_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 63 | 64 | self.x_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 65 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 66 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 67 | self.s_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 68 | 69 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 70 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 71 | 72 | # Evaluate predictions 73 | self.s_pred = self.net_s(self.t_u_tf) 74 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 75 | 76 | self.u_0_pred = self.net_u(self.x_0_tf, self.t_0_tf) 77 | self.u_Sbc_pred = self.net_u((self.s_pred - self.mu_x) / self.sigma_x, self.t_u_tf) 78 | self.s_0_pred = self.net_s(self.t_0_tf) 79 | self.u_Nc_pred = self.net_u_x(self.x_Nc_tf, self.t_Nc_tf) 80 | 81 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.t_r_tf) 82 | self.r_Nc_pred = self.net_r_Nc(self.t_Nc_tf) 83 | 84 | # Boundary loss and Neumann loss 85 | self.loss_u_0 = tf.reduce_mean(tf.square(self.u_0_pred - self.u_0_tf)) 86 | self.loss_Sbc = tf.reduce_mean(tf.square(self.u_Sbc_pred)) 87 | self.loss_s_0 = tf.reduce_mean(tf.square(self.s_0_pred - (2.0 - np.sqrt(3)))) 88 | self.loss_uNc = tf.reduce_mean(tf.square(self.u_Nc_pred - 2.0)) 89 | 90 | # Residual loss 91 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) 92 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred - self.s_Nc_tf)) 93 | 94 | # Total loss 95 | self.loss_ics = self.loss_s_0 + self.loss_u_0 96 | self.loss_bcs = self.loss_Sbc + self.loss_SNc + self.loss_uNc 97 | self.loss = self.loss_bcs + self.loss_ics + self.loss_res 98 | 99 | # Define optimizer with learning rate schedule 100 | self.global_step = tf.Variable(0, trainable=False) 101 | starter_learning_rate = 1e-3 102 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 103 | 1000, 0.9, staircase=False) 104 | # Passing global_step to minimize() will increment it at each step. 105 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 106 | 107 | self.loss_bcs_log = [] 108 | self.loss_ics_log = [] 109 | self.loss_res_log = [] 110 | self.saver = tf.train.Saver() 111 | 112 | # Initialize Tensorflow variables 113 | init = tf.global_variables_initializer() 114 | self.sess.run(init) 115 | 116 | # Xavier initialization 117 | def xavier_init(self, size): 118 | in_dim = size[0] 119 | out_dim = size[1] 120 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 121 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 122 | dtype=tf.float32) 123 | 124 | # Initialize network weights and biases using Xavier initialization 125 | def initialize_NN(self, layers): 126 | weights = [] 127 | biases = [] 128 | num_layers = len(layers) 129 | for l in range(0, num_layers - 1): 130 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 131 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 132 | weights.append(W) 133 | biases.append(b) 134 | return weights, biases 135 | 136 | # Evaluates the forward pass 137 | def forward_pass_u(self, H): 138 | num_layers = len(self.layers_u) 139 | for l in range(0, num_layers - 2): 140 | W = self.weights_u[l] 141 | b = self.biases_u[l] 142 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 143 | W = self.weights_u[-1] 144 | b = self.biases_u[-1] 145 | H = tf.add(tf.matmul(H, W), b) 146 | return H 147 | 148 | def forward_pass_s(self, H): 149 | num_layers = len(self.layers_s) 150 | for l in range(0, num_layers - 2): 151 | W = self.weights_s[l] 152 | b = self.biases_s[l] 153 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 154 | W = self.weights_s[-1] 155 | b = self.biases_s[-1] 156 | H = tf.add(tf.matmul(H, W), b) 157 | return H 158 | 159 | def net_u(self, x, t): 160 | u = self.forward_pass_u(tf.concat([x, t], 1)) 161 | return u 162 | 163 | def net_s(self, t): 164 | s = self.forward_pass_s(t) 165 | return s 166 | 167 | def net_u_x(self, x, t): 168 | u = self.net_u(x, t) 169 | u_x = tf.gradients(u, x)[0] / self.sigma_x 170 | return u_x 171 | 172 | # Forward pass for residual 173 | def net_r_u(self, x, t): 174 | u = self.net_u(x, t) 175 | u_t = tf.gradients(u, t)[0] / self.sigma_t 176 | u_x = tf.gradients(u, x)[0] / self.sigma_x 177 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 178 | residual = u_t - u_xx 179 | return residual 180 | 181 | def net_r_Nc(self, t): 182 | s = self.net_s(t) 183 | 184 | # Normalize s 185 | s = (s - self.mu_x) / self.sigma_x 186 | 187 | u_x = self.net_u_x(s, t) 188 | residual = u_x 189 | return residual 190 | 191 | def fetch_minibatch(self, sampler, N): 192 | X, Y = sampler.sample(N) 193 | X = (X - self.mu_X) / self.sigma_X 194 | return X, Y 195 | 196 | def train(self, nIter=10000, batch_size=128): 197 | start_time = timeit.default_timer() 198 | for it in range(nIter): 199 | # Fetch boundary and Neumann mini-batches 200 | X_ics_batch, u_ics_batch = self.fetch_minibatch(self.ics_sampler, batch_size) 201 | X_Ncs_batch, u_Ncs_batch = self.fetch_minibatch(self.Ncs_sampler, batch_size) 202 | 203 | # Fetch residual mini-batch 204 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 205 | 206 | # Define a dictionary for associating placeholders with data 207 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 208 | self.x_0_tf: X_ics_batch[:, 0:1], self.t_0_tf: X_ics_batch[:, 1:2], 209 | self.u_0_tf: u_ics_batch, 210 | self.x_Nc_tf: X_Ncs_batch[:, 0:1], self.t_Nc_tf: X_Ncs_batch[:, 1:2], 211 | self.s_Nc_tf: u_Ncs_batch, 212 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2]} 213 | 214 | # Run the Tensorflow session to minimize the loss 215 | self.sess.run(self.train_op, tf_dict) 216 | 217 | # Print 218 | if it % 10 == 0: 219 | elapsed = timeit.default_timer() - start_time 220 | loss_value = self.sess.run(self.loss, tf_dict) 221 | loss_ics_value, loss_bcs_value, loss_res_value = self.sess.run([self.loss_ics, self.loss_bcs, self.loss_res], tf_dict) 222 | self.loss_ics_log.append(loss_ics_value) 223 | self.loss_bcs_log.append(loss_bcs_value) 224 | self.loss_res_log.append(loss_res_value) 225 | 226 | print('It: %d, Loss: %.3e, Loss_ics: %.3e, Loss_bcs: %.3e, Loss_res: %.3e, Time: %.2f' % 227 | (it, loss_value, loss_ics_value, loss_bcs_value, loss_res_value, elapsed)) 228 | start_time = timeit.default_timer() 229 | 230 | # Predictions for u 231 | def predict_u(self, X_star): 232 | X_star = (X_star - self.mu_X) / self.sigma_X 233 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 234 | u_star = self.sess.run(self.u_pred, tf_dict) 235 | return u_star 236 | 237 | # Predictions for s 238 | def predict_s(self, X_star): 239 | X_star = (X_star - self.mu_X) / self.sigma_X 240 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 241 | s_star = self.sess.run(self.s_pred, tf_dict) 242 | return s_star 243 | 244 | def predict_r_u(self, X_star): 245 | X_star = (X_star - self.mu_X) / self.sigma_X 246 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 247 | r_u_star = self.sess.run(self.r_u_pred, tf_dict) 248 | return r_u_star 249 | 250 | 251 | class Stefan1D_inverse_I: 252 | def __init__(self, layers_u, ics_sampler, Ncs_sampler, res_sampler): 253 | # Normalization constants 254 | X, _ = res_sampler.sample(np.int32(1e5)) 255 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 256 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 257 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 258 | 259 | # Samplers 260 | self.ics_sampler = ics_sampler 261 | self.Ncs_sampler = Ncs_sampler 262 | self.res_sampler = res_sampler 263 | 264 | # Initialize network weights and biases 265 | self.layers_u = layers_u 266 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 267 | 268 | # Define Tensorflow session 269 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 270 | 271 | # Define placeholders and computational graph 272 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 273 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 274 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 275 | 276 | self.x_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 277 | self.t_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 278 | self.u_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 279 | 280 | self.x_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 281 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 282 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 283 | self.s_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 284 | 285 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 286 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 287 | 288 | # Evaluate predictions 289 | self.s_pred = self.net_s(self.t_u_tf) # s is given 290 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 291 | self.u_x_pred = self.net_u_x(self.x_u_tf, self.t_u_tf) 292 | 293 | self.u_ic_pred = self.net_u(self.x_ic_tf, self.t_ic_tf) 294 | self.u_Sbc_pred = self.net_u((self.s_pred - self.mu_x) / self.sigma_x, self.t_u_tf) 295 | self.u_Nc_pred = self.net_u_x(self.x_Nc_tf, self.t_Nc_tf) 296 | 297 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.t_r_tf) 298 | self.r_Nc_pred = self.net_r_Nc(self.t_Nc_tf) 299 | 300 | # Boundary loss and Neumann loss 301 | self.loss_ics = tf.reduce_mean(tf.square(self.u_ic_pred - self.u_ic_tf)) 302 | self.loss_Sbc = tf.reduce_mean(tf.square(self.u_Sbc_pred)) 303 | 304 | # Residual loss 305 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) 306 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred - self.s_Nc_tf)) 307 | 308 | # Total loss 309 | self.loss_bcs = self.loss_Sbc + self.loss_SNc 310 | self.loss = self.loss_bcs + self.loss_ics + self.loss_res 311 | 312 | # Define optimizer with learning rate schedule 313 | self.global_step = tf.Variable(0, trainable=False) 314 | starter_learning_rate = 1e-3 315 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 316 | 1000, 0.9, staircase=False) 317 | # Passing global_step to minimize() will increment it at each step. 318 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 319 | 320 | # loss loggers 321 | self.loss_bcs_log = [] 322 | self.loss_ics_log = [] 323 | self.loss_res_log = [] 324 | self.saver = tf.train.Saver() 325 | 326 | # Initialize Tensorflow variables 327 | init = tf.global_variables_initializer() 328 | self.sess.run(init) 329 | 330 | # Xavier initialization 331 | def xavier_init(self, size): 332 | in_dim = size[0] 333 | out_dim = size[1] 334 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 335 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 336 | dtype=tf.float32) 337 | 338 | # Initialize network weights and biases using Xavier initialization 339 | def initialize_NN(self, layers): 340 | weights = [] 341 | biases = [] 342 | num_layers = len(layers) 343 | for l in range(0, num_layers - 1): 344 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 345 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 346 | weights.append(W) 347 | biases.append(b) 348 | return weights, biases 349 | 350 | # Evaluates the forward pass 351 | def forward_pass_u(self, H): 352 | num_layers = len(self.layers_u) 353 | for l in range(0, num_layers - 2): 354 | W = self.weights_u[l] 355 | b = self.biases_u[l] 356 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 357 | W = self.weights_u[-1] 358 | b = self.biases_u[-1] 359 | H = tf.add(tf.matmul(H, W), b) 360 | return H 361 | 362 | # Forward pass for u 363 | def net_u(self, x, t): 364 | u = self.forward_pass_u(tf.concat([x, t], 1)) 365 | return u 366 | 367 | # Forward pass for s 368 | def net_s(self, t): 369 | # denormalzie t 370 | t = t * self.sigma_t + self.mu_t 371 | s = 2 - tf.math.sqrt(3 - 2 * t) 372 | return s 373 | 374 | def net_u_x(self, x, t): 375 | u = self.net_u(x, t) 376 | u_x = tf.gradients(u, x)[0] / self.sigma_x 377 | return u_x 378 | 379 | # Forward pass for residual 380 | def net_r_u(self, x, t): 381 | u = self.net_u(x, t) 382 | u_t = tf.gradients(u, t)[0] / self.sigma_t 383 | u_x = tf.gradients(u, x)[0] / self.sigma_x 384 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 385 | residual = u_t - u_xx 386 | return residual 387 | 388 | def net_r_Nc(self, t): 389 | s = self.net_s(t) 390 | 391 | # Normalize s 392 | s = (s - self.mu_x) / self.sigma_x 393 | 394 | u_x = self.net_u_x(s, t) 395 | residual = u_x 396 | return residual 397 | 398 | def fetch_minibatch(self, sampler, N): 399 | X, Y = sampler.sample(N) 400 | X = (X - self.mu_X) / self.sigma_X 401 | return X, Y 402 | 403 | def train(self, nIter=10000, batch_size=128): 404 | start_time = timeit.default_timer() 405 | for it in range(nIter): 406 | # Fetch boundary and Neumann mini-batches 407 | X_ics_batch, u_ics_batch = self.fetch_minibatch(self.ics_sampler, batch_size) 408 | X_Ncs_batch, u_Ncs_batch = self.fetch_minibatch(self.Ncs_sampler, batch_size) 409 | 410 | # Fetch residual mini-batch 411 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 412 | 413 | # Define a dictionary for associating placeholders with data 414 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 415 | self.x_ic_tf: X_ics_batch[:, 0:1], self.t_ic_tf: X_ics_batch[:, 1:2], 416 | self.u_ic_tf: u_ics_batch, 417 | self.x_Nc_tf: X_Ncs_batch[:, 0:1], self.t_Nc_tf: X_Ncs_batch[:, 1:2], 418 | self.s_Nc_tf: u_Ncs_batch, 419 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2]} 420 | 421 | # Run the Tensorflow session to minimize the loss 422 | self.sess.run(self.train_op, tf_dict) 423 | 424 | # Print 425 | if it % 10 == 0: 426 | elapsed = timeit.default_timer() - start_time 427 | loss_value = self.sess.run(self.loss, tf_dict) 428 | loss_ics_value, loss_bcs_value, loss_res_value = self.sess.run( 429 | [self.loss_ics, self.loss_bcs, self.loss_res], tf_dict) 430 | 431 | self.loss_ics_log.append(loss_ics_value) 432 | self.loss_bcs_log.append(loss_bcs_value) 433 | self.loss_res_log.append(loss_res_value) 434 | 435 | print('It: %d, Loss: %.3e, Loss_ics: %.3e, Loss_bcs: %.3e, Loss_res: %.3e, Time: %.2f' % 436 | (it, loss_value, loss_ics_value, loss_bcs_value, loss_res_value, elapsed)) 437 | start_time = timeit.default_timer() 438 | 439 | 440 | # Predictions for u 441 | def predict_u(self, X_star): 442 | X_star = (X_star - self.mu_X) / self.sigma_X 443 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 444 | u_star = self.sess.run(self.u_pred, tf_dict) 445 | return u_star 446 | 447 | # Predictions for s 448 | def predict_s(self, X_star): 449 | X_star = (X_star - self.mu_X) / self.sigma_X 450 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 451 | s_star = self.sess.run(self.s_pred, tf_dict) 452 | return s_star 453 | 454 | # Predictions for u_x 455 | def predict_u_x(self, X_star): 456 | X_star = (X_star - self.mu_X) / self.sigma_X 457 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 458 | r_u_star = self.sess.run(self.u_x_pred, tf_dict) 459 | return r_u_star 460 | 461 | 462 | class Stefan1D_inverse_II: 463 | def __init__(self, layers_u, layers_s, bcs_sampler, Ncs_sampler, res_sampler, data_sampler): 464 | # Normalization constants 465 | X, _ = res_sampler.sample(np.int32(1e5)) 466 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 467 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 468 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 469 | 470 | # Samplers 471 | self.bcs_sampler = bcs_sampler 472 | self.Ncs_sampler = Ncs_sampler 473 | self.res_sampler = res_sampler 474 | self.data_sampler = data_sampler 475 | 476 | # Initialize network weights and biases 477 | self.layers_u = layers_u 478 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 479 | 480 | self.layers_s = layers_s 481 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 482 | 483 | # Define Tensorflow session 484 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 485 | 486 | # Define placeholders and computational graph 487 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 488 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 489 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # u(x,t) 490 | self.s_tf = tf.placeholder(tf.float32, shape=(None, 1)) 491 | 492 | self.x_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 493 | self.t_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 494 | self.u_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 495 | 496 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 497 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 498 | 499 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 500 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 501 | 502 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 503 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 504 | 505 | # Evaluate predictions 506 | self.s_pred = self.net_s(self.t_u_tf) 507 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 508 | 509 | self.u_data_pred = self.net_u(self.x_data_tf, self.t_data_tf) 510 | 511 | self.u_bc_pred = self.net_u((self.s_pred - self.mu_x)/self.sigma_x, self.t_u_tf) 512 | self.u_Nc_pred = self.net_u_x((self.net_s(self.t_Nc_tf) - self.mu_x)/self.sigma_x, self.t_Nc_tf) 513 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.t_r_tf) 514 | 515 | # Stefan Boundary loss 516 | self.loss_bc = tf.reduce_mean(tf.square(self.u_bc_pred)) 517 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.t_0_tf)- self.s_0_tf) ) # s(0) = 0 518 | self.loss_Nc = tf.reduce_mean(tf.square(self.u_Nc_pred - self.u_Nc_tf)) 519 | # Data loss 520 | self.loss_data = tf.reduce_mean(tf.square(self.u_data_pred - self.u_data_tf)) 521 | 522 | # Boundary loss 523 | self.loss_bcs = self.loss_bc + self.loss_s_0 + self.loss_Nc 524 | # Neumann condition is important! 525 | 526 | # Residual loss 527 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) 528 | 529 | # Total loss 530 | self.loss = self.loss_bcs + self.loss_res + self.loss_data 531 | 532 | # Define optimizer with learning rate schedule 533 | self.global_step = tf.Variable(0, trainable=False) 534 | starter_learning_rate = 1e-3 535 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 536 | 1000, 0.9, staircase=False) 537 | # Passing global_step to minimize() will increment it at each step. 538 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 539 | 540 | self.loss_bcs_log = [] 541 | self.loss_data_log = [] 542 | self.loss_res_log = [] 543 | self.saver = tf.train.Saver() 544 | 545 | # Initialize Tensorflow variables 546 | init = tf.global_variables_initializer() 547 | self.sess.run(init) 548 | 549 | # Xavier initialization 550 | def xavier_init(self, size): 551 | in_dim = size[0] 552 | out_dim = size[1] 553 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 554 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 555 | dtype=tf.float32) 556 | 557 | # Initialize network weights and biases using Xavier initialization 558 | def initialize_NN(self, layers): 559 | weights = [] 560 | biases = [] 561 | num_layers = len(layers) 562 | for l in range(0, num_layers - 1): 563 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 564 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 565 | weights.append(W) 566 | biases.append(b) 567 | return weights, biases 568 | 569 | # Evaluates the forward pass u 570 | def forward_pass_u(self, H): 571 | num_layers = len(self.layers_u) 572 | for l in range(0, num_layers - 2): 573 | W = self.weights_u[l] 574 | b = self.biases_u[l] 575 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 576 | W = self.weights_u[-1] 577 | b = self.biases_u[-1] 578 | H = tf.add(tf.matmul(H, W), b) 579 | return H 580 | 581 | # Evaluates the forward pass s 582 | def forward_pass_s(self, H): 583 | num_layers = len(self.layers_s) 584 | for l in range(0, num_layers - 2): 585 | W = self.weights_s[l] 586 | b = self.biases_s[l] 587 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 588 | W = self.weights_s[-1] 589 | b = self.biases_s[-1] 590 | H = tf.add(tf.matmul(H, W), b) 591 | return H 592 | 593 | # Forward pass for u 594 | def net_u(self, x, t): 595 | u = self.forward_pass_u(tf.concat([x, t], 1)) 596 | return u 597 | 598 | # Forward pass for s 599 | def net_s(self, t): 600 | s = self.forward_pass_s(t) 601 | return s 602 | 603 | def net_s_t(self, t): 604 | s = self.net_s(t) 605 | s_t = tf.gradients(s, t)[0] / self.sigma_t 606 | return s_t 607 | 608 | def net_u_x(self, x, t): 609 | u = self.net_u(x, t) 610 | u_x = tf.gradients(u, x)[0] / self.sigma_x 611 | return u_x 612 | 613 | # Forward pass for residual 614 | def net_r_u(self, x, t): 615 | u = self.net_u(x, t) 616 | u_t = tf.gradients(u, t)[0] / self.sigma_t 617 | u_x = tf.gradients(u, x)[0] / self.sigma_x 618 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 619 | residual = u_t - u_xx 620 | return residual 621 | 622 | def fetch_minibatch(self, sampler, N): 623 | X, Y = sampler.sample(N) 624 | X = (X - self.mu_X) / self.sigma_X 625 | return X, Y 626 | 627 | def train(self, nIter=10000, batch_size=128): 628 | start_time = timeit.default_timer() 629 | for it in range(nIter): 630 | # Fetch boundary and data mini-batches 631 | X_0_batch, s_0_batch = self.fetch_minibatch(self.bcs_sampler, batch_size) 632 | X_Nc_batch, u_Nc_batch = self.fetch_minibatch(self.Ncs_sampler, batch_size) 633 | X_data_batch, u_data_batch = self.fetch_minibatch(self.data_sampler, batch_size) 634 | 635 | # Fetch residual mini-batch 636 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 637 | 638 | # Define a dictionary for associating placeholders with data 639 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 640 | self.t_Nc_tf: X_Nc_batch[:, 1:2], self.u_Nc_tf: u_Nc_batch, 641 | self.t_0_tf: X_0_batch[:, 1:2], self.s_0_tf: s_0_batch, 642 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2], 643 | self.x_data_tf: X_data_batch[:, 0:1], self.t_data_tf: X_data_batch[:, 1:2], 644 | self.u_data_tf: u_data_batch} 645 | 646 | self.sess.run(self.train_op, tf_dict) 647 | 648 | # Print 649 | if it % 10 == 0: 650 | elapsed = timeit.default_timer() - start_time 651 | loss_value = self.sess.run(self.loss, tf_dict) 652 | loss_bcs_value, loss_data_value, loss_res_value = self.sess.run( 653 | [self.loss_bcs, self.loss_data, self.loss_res], tf_dict) 654 | self.loss_bcs_log.append(loss_bcs_value) 655 | self.loss_data_log.append(loss_data_value) 656 | self.loss_res_log.append(loss_res_value) 657 | 658 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_Data: %.3e, Loss_res: %.3e, Time: %.2f' % 659 | (it, loss_value, loss_bcs_value, loss_data_value, loss_res_value, elapsed)) 660 | 661 | start_time = timeit.default_timer() 662 | 663 | # Predictions for u 664 | def predict_u(self, X_star): 665 | X_star = (X_star - self.mu_X) / self.sigma_X 666 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 667 | u_star = self.sess.run(self.u_pred, tf_dict) 668 | return u_star 669 | 670 | # Predictions for s 671 | def predict_s(self, X_star): 672 | X_star = (X_star - self.mu_X) / self.sigma_X 673 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 674 | s_star = self.sess.run(self.s_pred, tf_dict) 675 | return s_star 676 | 677 | # Predictions for u_x 678 | def predict_u_x(self, X_star): 679 | X_star = (X_star - self.mu_X) / self.sigma_X 680 | tf_dict = {self.t_Nc_tf: X_star[:, 1:2]} 681 | r_u_star = self.sess.run(self.u_Nc_pred, tf_dict) 682 | return r_u_star 683 | 684 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan 1D2P.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan1D_2P_direct.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Feb 5 20:01:15 2020 4 | @author: sifan 5 | """ 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from scipy.interpolate import griddata 11 | import seaborn as sns 12 | from Stefan1D_2p_models_tf import Sampler, DataSampler, Stefan1D_2P_direct 13 | import pandas as pd 14 | import os 15 | 16 | if __name__ == '__main__': 17 | 18 | # Exact u1 19 | def u1(x): 20 | # x = (x, t) 21 | t = x[:, 1:2] # compute t first! otherwise x is changed! 22 | x = x[:, 0:1] 23 | 24 | u1 = 2 * (np.exp((t + 0.5 - x) / 2) - 1) 25 | return u1 26 | 27 | # Exact u2 28 | def u2(x): 29 | # x = (x, t) 30 | t = x[:, 1:2] 31 | x = x[:, 0:1] 32 | 33 | u2 = np.exp(t + 0.5 - x) - 1 34 | return u2 35 | 36 | # Exact s 37 | def s(x): 38 | t = x[:, 1:2] 39 | s = t + 0.5 40 | return s 41 | 42 | # Exact u 43 | def u(x): 44 | return np.where(x[:, 0:1] <= s(x), u1(x), u2(x)) 45 | 46 | # initial condition for u1 47 | def u1_0(x): 48 | x = x[:, 0:1] 49 | u1_0 = 2 * (np.exp((0.5 - x) / 2) - 1) 50 | return u1_0 51 | 52 | # initial condition for u2 53 | def u2_0(x): 54 | x = x[:, 0:1] 55 | u2 = np.exp(0.5 - x) - 1 56 | return u2 57 | 58 | 59 | def psi_1(x): 60 | t = x[:, 1:2] 61 | psi_1 = 2 * (np.exp((t + 0.5) / 2) - 1) 62 | return psi_1 63 | 64 | def psi_2(x): 65 | t = x[:, 1:2] 66 | psi_2 = np.exp(t + 0.5 - 2) - 1 67 | return psi_2 68 | 69 | # initial condition for s 70 | def s_0(x): 71 | z = 0.5 72 | N = x.shape[0] 73 | return z * np.ones((N, 1)) 74 | 75 | # Domain boundaries 76 | ics_coords = np.array([[0.0, 0.0], 77 | [2.0, 0.0]]) 78 | bc1_coords = np.array([[0.0, 0.0], 79 | [0.0, 1.0]]) 80 | bc2_coords = np.array([[2.0, 0.0], 81 | [2.0, 1.0]]) 82 | dom_coords = np.array([[0.0, 0.0], 83 | [2.0, 1.0]]) 84 | 85 | # Create boundary conditions samplers 86 | ic1_sampler = Sampler(2, ics_coords, lambda x: u1_0(x), name='Initial Condition') 87 | ic2_sampler = Sampler(2, ics_coords, lambda x: u2_0(x), name='Initial Condition') 88 | 89 | ics_sampler = [ic1_sampler, ic2_sampler] 90 | 91 | bc1_sampler = Sampler(2, bc1_coords, lambda x: psi_1(x), name='Boundary Condition') 92 | bc2_sampler = Sampler(2, bc2_coords, lambda x: psi_2(x), name='Boundary Condition') 93 | 94 | bcs_sampler = [bc1_sampler, bc2_sampler] 95 | 96 | # Create residual sampler 97 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 98 | 99 | # Define model 100 | layers_u = [2, 100, 100, 100, 2] 101 | layers_s = [1, 100, 100, 100, 1] # or we can map s to (t, s(t)) 102 | model = Stefan1D_2P_direct(layers_u, layers_s, bcs_sampler, ics_sampler, res_sampler) 103 | 104 | # Train the model 105 | model.train(nIter=40000, batch_size=128) 106 | 107 | # Test data 108 | nn = 100 109 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 110 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 111 | X, T = np.meshgrid(x, t) 112 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 113 | 114 | # Exact solutions 115 | u_star = u(X_star) 116 | s_star = s(X_star) 117 | 118 | # Predictions 119 | u_pred = model.predict_u(X_star) 120 | s_pred = model.predict_s(X_star) 121 | 122 | # Errors 123 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 124 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 125 | 126 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 127 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 128 | 129 | 130 | # Plot 131 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 132 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 133 | 134 | t = np.linspace(0,1, 100)[:, None] 135 | x = np.zeros_like(t) 136 | x_star = np.concatenate((x,t), axis=1) 137 | 138 | s_star = s(x_star) 139 | s_pred = model.predict_s(x_star) 140 | error_s = np.abs(s_star - s_pred) 141 | 142 | # Plot for solution u 143 | fig_1 = plt.figure(1, figsize=(18, 5)) 144 | plt.subplot(1, 3, 1) 145 | plt.plot(s_star, t, color='black', linewidth=2) 146 | plt.pcolor(X, T, U_star, cmap='jet') 147 | plt.colorbar() 148 | plt.xlabel(r'$x$') 149 | plt.ylabel(r'$t$') 150 | plt.title('Exact $u(x,t)$') 151 | 152 | plt.subplot(1, 3, 2) 153 | plt.pcolor(X, T, U_pred, cmap='jet') 154 | plt.plot(s_pred, t, color='black', linewidth=2) 155 | plt.colorbar() 156 | plt.xlabel(r'$x$') 157 | plt.ylabel(r'$t$') 158 | plt.title('Predicted $u(x,t)$') 159 | 160 | plt.subplot(1, 3, 3) 161 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 162 | plt.colorbar(format='%.0e') 163 | plt.xlabel(r'$x$') 164 | plt.ylabel(r'$t$') 165 | plt.title('Absolute Error') 166 | 167 | plt.tight_layout() 168 | plt.show() 169 | 170 | # Plot for solution s 171 | fig_2 = plt.figure(2, figsize=(12, 5)) 172 | plt.subplot(1, 2, 1) 173 | plt.plot(t, s_star, label='Exact') 174 | plt.plot(t, s_pred, '--', label='Predicted') 175 | plt.xlabel(r'$t$') 176 | plt.ylabel(r'$s(t)$') 177 | plt.title('Moving Boundary') 178 | plt.legend() 179 | 180 | plt.subplot(1, 2, 2) 181 | plt.plot(t, error_s) 182 | plt.xlabel(r'$t$') 183 | plt.ylabel(r'Point-wise Error') 184 | plt.title('Absolute Error') 185 | plt.yscale('log') 186 | plt.tight_layout() 187 | plt.show() 188 | 189 | 190 | 191 | 192 | 193 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan1D_2P_inverse_I.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from scipy.interpolate import griddata 5 | import seaborn as sns 6 | from Stefan1D_2p_models_tf import Sampler, DataSampler, Stefan1D_2P_inverse_I 7 | import pandas as pd 8 | import os 9 | 10 | if __name__ == '__main__': 11 | 12 | # Exact u1 13 | def u1(x): 14 | # x = (x, t) 15 | t = x[:, 1:2] 16 | x = x[:, 0:1] 17 | 18 | u1 = 2 * (np.exp((t + 0.5 - x) / 2) - 1) 19 | return u1 20 | 21 | # Exact u2 22 | def u2(x): 23 | # x = (x, t) 24 | t = x[:, 1:2] 25 | x = x[:, 0:1] 26 | 27 | u2 = np.exp(t + 0.5 - x) - 1 28 | return u2 29 | 30 | # Exact s 31 | def s(x): 32 | t = x[:, 1:2] 33 | s = t + 0.5 34 | return s 35 | 36 | # Exact u 37 | def u(x): 38 | return np.where(x[:, 0:1] <= s(x), u1(x), u2(x)) 39 | 40 | # condition at the final time 41 | def u1_T(x): 42 | x = x[:, 0:1] 43 | u1_T = 2 * (np.exp((1 + 0.5 - x) / 2) - 1) 44 | return u1_T 45 | 46 | def u2_T(x): 47 | # x = (x, t) 48 | 49 | x = x[:, 0:1] 50 | u2_T = np.exp(1 + 0.5 - x) - 1 51 | return u2_T 52 | 53 | # Initial condition for s 54 | def s_0(x): 55 | z = 0.5 56 | N = x.shape[0] 57 | return z * np.ones((N, 1)) 58 | 59 | # Initial condition for u1 60 | def u1_0(x): 61 | x = x[:, 0:1] 62 | u1_0 = 2 * (np.exp((0.5 - x) / 2) - 1) 63 | return u1_0 64 | 65 | # Initial condition for u2 66 | def u2_0(x): 67 | x = x[:, 0:1] 68 | u2 = np.exp(0.5 - x) - 1 69 | return u2 70 | 71 | 72 | # Domain boundaries 73 | ics_coords = np.array([[0.0, 0.0], 74 | [2.0, 0.0]]) 75 | ft_coords = np.array([[0.0, 1.0], 76 | [2.0, 1.0]]) 77 | dom_coords = np.array([[0.0, 0.0], 78 | [2.0, 1.0]]) 79 | 80 | ic1_sampler = Sampler(2, ics_coords, lambda x: u1_0(x), name='Initial Condition') 81 | ic2_sampler = Sampler(2, ics_coords, lambda x: u2_0(x), name='Initial Condition') 82 | 83 | ics_sampler = [ic1_sampler, ic2_sampler] 84 | 85 | ft1_sampler = Sampler(2, ft_coords, lambda x: u1_T(x), name='Final Time Condition') 86 | ft2_sampler = Sampler(2, ft_coords, lambda x: u2_T(x), name='Final Time Condition') 87 | 88 | ft_sampler = [ft1_sampler, ft2_sampler] 89 | 90 | # Create residual sampler 91 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 92 | 93 | # Define model 94 | layers_u = [2, 100, 100, 100, 2] 95 | layers_s = [1, 100, 100, 100, 1] 96 | model = Stefan1D_2P_inverse_I(layers_u, layers_s, ics_sampler, ft_sampler, res_sampler) 97 | 98 | # Train the model 99 | model.train(nIter=40000, batch_size=128) 100 | 101 | # Test data 102 | nn = 100 103 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 104 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 105 | X, T = np.meshgrid(x, t) 106 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 107 | 108 | u_star = u(X_star) 109 | s_star = s(X_star) 110 | 111 | # Predictions 112 | u_pred = model.predict_u(X_star) 113 | s_pred = model.predict_s(X_star) 114 | 115 | # Errors 116 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 117 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 118 | 119 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 120 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 121 | 122 | # Plot 123 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 124 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 125 | 126 | t = np.linspace(0, 1, 100)[:, None] 127 | x = np.zeros_like(t) 128 | x_star = np.concatenate((x, t), axis=1) 129 | 130 | s_star = s(x_star) 131 | s_pred = model.predict_s(x_star) 132 | error_s = np.abs(s_star - s_pred) 133 | 134 | # Plot for solution u 135 | fig_1 = plt.figure(1, figsize=(18, 5)) 136 | plt.subplot(1, 3, 1) 137 | plt.plot(s_star, t, color='black', linewidth=2) 138 | plt.pcolor(X, T, U_star, cmap='jet') 139 | plt.colorbar() 140 | plt.xlabel(r'$x$') 141 | plt.ylabel(r'$t$') 142 | plt.title('Exact $u(x,t)$') 143 | 144 | plt.subplot(1, 3, 2) 145 | plt.pcolor(X, T, U_pred, cmap='jet') 146 | plt.plot(s_pred, t, color='black', linewidth=2) 147 | plt.colorbar() 148 | plt.xlabel(r'$x$') 149 | plt.ylabel(r'$t$') 150 | plt.title('Predicted $u(x,t)$') 151 | 152 | plt.subplot(1, 3, 3) 153 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 154 | plt.colorbar(format='%.0e') 155 | plt.xlabel(r'$x$') 156 | plt.ylabel(r'$t$') 157 | plt.title('Absolute Error') 158 | 159 | plt.tight_layout() 160 | plt.show() 161 | 162 | # Plot for solution s 163 | fig_2 = plt.figure(2, figsize=(12, 5)) 164 | plt.subplot(1, 2, 1) 165 | plt.plot(t, s_star, label='Exact') 166 | plt.plot(t, s_pred, '--', label='Predicted') 167 | plt.xlabel(r'$t$') 168 | plt.ylabel(r'$s(t)$') 169 | plt.title('Moving Boundary') 170 | plt.legend() 171 | 172 | plt.subplot(1, 2, 2) 173 | plt.plot(t, error_s) 174 | plt.xlabel(r'$t$') 175 | plt.ylabel(r'Point-wise Error') 176 | plt.title('Absolute Error') 177 | plt.yscale('log') 178 | plt.tight_layout() 179 | plt.show() 180 | 181 | 182 | 183 | 184 | 185 | 186 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan1D_2P_inverse_II.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Feb 5 20:01:15 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from scipy.interpolate import griddata 12 | import seaborn as sns 13 | from Stefan1D_2p_models_tf import Sampler, DataSampler, Stefan1D_2P_inverse_II 14 | import pandas as pd 15 | import os 16 | 17 | if __name__ == '__main__': 18 | 19 | # Exact u1 20 | def u1(x): 21 | # x = (x, t) 22 | t = x[:,1:2] 23 | x = x[:,0:1] 24 | 25 | u1 = 2 * (np.exp((t + 0.5 - x) / 2) - 1) 26 | return u1 27 | 28 | # Exact u2 29 | def u2(x): 30 | # x = (x, t) 31 | t = x[:, 1:2] 32 | x = x[:, 0:1] 33 | 34 | u2 = np.exp(t + 0.5 - x) - 1 35 | return u2 36 | 37 | # Exact s 38 | def s(x): 39 | t = x[:, 1:2] 40 | s = t + 0.5 41 | return s 42 | 43 | # Exact u 44 | def u(x): 45 | return np.where(x[:, 0:1] <= s(x), u1(x), u2(x)) 46 | 47 | def I(x): 48 | z = 1.0 49 | N = x.shape[0] 50 | return z * np.ones((N, 1)) 51 | 52 | # Initial condition for s 53 | def s_0(x): 54 | z = 0.5 55 | N = x.shape[0] 56 | return z * np.ones((N, 1)) 57 | 58 | 59 | # Domain boundaries 60 | bc_coords = np.array([[0.0, 0.0], 61 | [0.0, 0.0]]) 62 | dom_coords = np.array([[0.0, 0.0], 63 | [2.0, 1.0]]) 64 | 65 | # Create boundary conditions samplers 66 | bcs_sampler = Sampler(2, bc_coords, lambda x: s_0(x), name='Boundary Condition') 67 | Sbc_sampler = Sampler(2, dom_coords, lambda x: I(x), name='Stefan Boundary Condition') 68 | SNc_sampler = Sampler(2, dom_coords, lambda x: I(x), name='Neumann Boundary Condition') 69 | 70 | # Create residual sampler 71 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 72 | data_X, data_u = res_sampler.sample(40) 73 | data_sampler = DataSampler(data_X, data_u) 74 | 75 | # Define model 76 | layers_u = [2, 100, 100, 100, 2] 77 | layers_s = [1, 100, 100, 100, 1] 78 | model = Stefan1D_2P_inverse_II(layers_u, layers_s, bcs_sampler, Sbc_sampler, SNc_sampler, res_sampler, data_sampler) 79 | 80 | # Train the model 81 | model.train(nIter=40000, batch_size=128) 82 | 83 | # Test data 84 | nn = 100 85 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 86 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 87 | X, T = np.meshgrid(x, t) 88 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 89 | 90 | u_star = u(X_star) 91 | s_star = s(X_star) 92 | 93 | plt.plot(data_X[:, 0:1], data_X[:, 1:2], 'o') 94 | plt.plot(s_star, X_star[:, 1:2], ) 95 | plt.show() 96 | 97 | # Predictions 98 | u_pred = model.predict_u(X_star) 99 | s_pred = model.predict_s(X_star) 100 | 101 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 102 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 103 | 104 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 105 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 106 | 107 | # Plot 108 | 109 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 110 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 111 | 112 | t = np.linspace(0,1, 100)[:, None] 113 | x = np.zeros_like(t) 114 | x_star = np.concatenate((x,t), axis=1) 115 | 116 | s_star = s(x_star) 117 | s_pred = model.predict_s(x_star) 118 | error_s = np.abs(s_star - s_pred) 119 | 120 | fig_1 = plt.figure(1, figsize=(18, 5)) 121 | plt.subplot(1, 3, 1) 122 | plt.plot(data_X[:,0:1], data_X[:,1:2], 'x', color='black') 123 | plt.plot(s_star, t, color='black', linewidth=2) 124 | plt.pcolor(X, T, U_star, cmap='jet') 125 | plt.colorbar() 126 | plt.xlabel(r'$x$') 127 | plt.ylabel(r'$t$') 128 | plt.title('Exact $u(x, t)$') 129 | 130 | plt.subplot(1, 3, 2) 131 | plt.pcolor(X, T, U_pred, cmap='jet') 132 | plt.plot(s_pred, t, color='black', linewidth=2) 133 | plt.colorbar() 134 | plt.xlabel(r'$x$') 135 | plt.ylabel(r'$t$') 136 | plt.title('Predicted $u(x, t)$') 137 | 138 | plt.subplot(1, 3, 3) 139 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 140 | plt.colorbar(format='%.0e') 141 | plt.xlabel(r'$x$') 142 | plt.ylabel(r'$t$') 143 | plt.title('Absolute Error') 144 | 145 | plt.tight_layout() 146 | plt.show() 147 | 148 | fig_2 = plt.figure(2, figsize=(12, 5)) 149 | plt.subplot(1, 2, 1) 150 | plt.plot(t, s_star, label='Exact') 151 | plt.plot(t, s_pred, '--', label='Predicted') 152 | plt.xlabel(r'$t$') 153 | plt.ylabel(r'$s(t)$') 154 | plt.title('Moving Boundary') 155 | plt.legend() 156 | 157 | plt.subplot(1, 2, 2) 158 | plt.plot(t, error_s) 159 | plt.xlabel(r'$t$') 160 | plt.ylabel(r'Point-wise Error') 161 | plt.title('Absolute Error') 162 | plt.yscale('log') 163 | plt.tight_layout() 164 | plt.show() 165 | 166 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan1D_2P_inverse_III.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Feb 5 20:01:15 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from scipy.interpolate import griddata 12 | import seaborn as sns 13 | from Stefan1D_2p_models_tf import Sampler, DataSampler, Stefan1D_2P_inverse_III 14 | import pandas as pd 15 | import os 16 | 17 | if __name__ == '__main__': 18 | 19 | # Exact u1 20 | def u1(x): 21 | # x = (x, t) 22 | t = x[:,1:2] # compute t first! otherwise x is changed! 23 | x = x[:,0:1] 24 | 25 | u1 = 2 * (np.exp((t + 0.5 - x) / 2) - 1) 26 | return u1 27 | 28 | # Exact u2 29 | def u2(x): 30 | # x = (x, t) 31 | t = x[:, 1:2] 32 | x = x[:, 0:1] 33 | 34 | u2 = np.exp(t + 0.5 - x) - 1 35 | return u2 36 | 37 | # Exact s 38 | def s(x): 39 | t = x[:, 1:2] 40 | s = t + 0.5 41 | return s 42 | 43 | # Exact u 44 | def u(x): 45 | return np.where(x[:, 0:1] <= s(x), u1(x), u2(x)) 46 | 47 | def I(x): 48 | z = 1.0 49 | N = x.shape[0] 50 | return z * np.ones((N, 1)) 51 | 52 | # Initial condition for s 53 | def s_0(x): 54 | z = 0.5 55 | N = x.shape[0] 56 | return z * np.ones((N, 1)) 57 | 58 | 59 | # Domain boundaries 60 | bc_coords = np.array([[0.0, 0.0], 61 | [0.0, 0.0]]) 62 | dom_coords = np.array([[0.0, 0.0], 63 | [2.0, 1.0]]) 64 | 65 | # Create boundary conditions samplers 66 | bcs_sampler = Sampler(2, bc_coords, lambda x: s_0(x), name='Boundary Condition') 67 | Sbc_sampler = Sampler(2, dom_coords, lambda x: I(x), name='Stefan Boundary Condition') 68 | SNc_sampler = Sampler(2, dom_coords, lambda x: I(x), name='Neumann Boundary Condition') 69 | 70 | # Create residual sampler 71 | res_sampler = Sampler(2, dom_coords, lambda x: u(x), name='Forcing') 72 | 73 | data_X, data_u = res_sampler.sample(200) 74 | 75 | data_sampler = DataSampler(data_X, data_u) 76 | 77 | # Define model 78 | layers_u = [2, 100, 100, 100, 2] 79 | layers_s = [1, 100, 100, 100, 1] # or we can map s to (t, s(t)) 80 | 81 | # Method 82 | # M1 : Standard PINNs 83 | # M2 : PINNs with adaptive learning rate method 84 | method = 'M1' 85 | model = Stefan1D_2P_inverse_III(layers_u, layers_s, bcs_sampler, Sbc_sampler, SNc_sampler, res_sampler, data_sampler,method) 86 | 87 | # Train the model 88 | model.train(nIter=40000, batch_size=128) 89 | 90 | # Test data 91 | nn = 100 92 | x = np.linspace(dom_coords[0, 0], dom_coords[1, 0], nn)[:, None] 93 | t = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 94 | X, T = np.meshgrid(x, t) 95 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 96 | 97 | u_star = u(X_star) 98 | s_star = s(X_star) 99 | 100 | plt.plot(data_X[:, 0:1], data_X[:, 1:2], 'o') 101 | plt.plot(s_star, X_star[:, 1:2], ) 102 | plt.show() 103 | 104 | # Predictions 105 | u_pred = model.predict_u(X_star) 106 | s_pred = model.predict_s(X_star) 107 | 108 | error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) 109 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 110 | 111 | print('Relative L2 error_u: {:.2e}'.format(error_u)) 112 | print('Relative L2 error_s: {:.2e}'.format(error_s)) 113 | 114 | # Plot 115 | U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic') 116 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 117 | 118 | t = np.linspace(0,1, 100)[:, None] 119 | x = np.zeros_like(t) 120 | x_star = np.concatenate((x,t), axis=1) 121 | 122 | s_star = s(x_star) 123 | s_pred = model.predict_s(x_star) 124 | error_s = np.abs(s_star - s_pred) 125 | 126 | fig_1 = plt.figure(1, figsize=(18, 5)) 127 | plt.subplot(1, 3, 1) 128 | plt.plot(data_X[:,0:1], data_X[:,1:2], 'x', color='black') 129 | plt.plot(s_star, t, color='black', linewidth=2) 130 | plt.pcolor(X, T, U_star, cmap='jet') 131 | plt.colorbar() 132 | plt.xlabel(r'$x$') 133 | plt.ylabel(r'$t$') 134 | plt.title('Exact $u(x, t)$') 135 | 136 | plt.subplot(1, 3, 2) 137 | plt.pcolor(X, T, U_pred, cmap='jet') 138 | plt.plot(s_pred, t, color='black', linewidth=2) 139 | plt.colorbar() 140 | plt.xlabel(r'$x$') 141 | plt.ylabel(r'$t$') 142 | plt.title('Predicted $u(x, t)$') 143 | 144 | plt.subplot(1, 3, 3) 145 | plt.pcolor(X, T, np.abs(U_star - U_pred), cmap='jet') 146 | plt.colorbar(format='%.0e') 147 | plt.xlabel(r'$x$') 148 | plt.ylabel(r'$t$') 149 | plt.title('Absolute error') 150 | 151 | plt.tight_layout() 152 | plt.show() 153 | 154 | fig_2 = plt.figure(2, figsize=(12, 5)) 155 | plt.subplot(1, 2, 1) 156 | plt.plot(t, s_star, label='Exact') 157 | plt.plot(t, s_pred, '--', label='Predicted') 158 | plt.xlabel(r'$t$') 159 | plt.ylabel(r'$s(t)$') 160 | plt.title('Moving Boundary') 161 | plt.legend() 162 | 163 | plt.subplot(1, 2, 2) 164 | plt.plot(t, error_s) 165 | plt.xlabel(r'$t$') 166 | plt.ylabel(r'$Error$') 167 | plt.title('Absolute Error') 168 | plt.yscale('log') 169 | plt.tight_layout() 170 | plt.show() 171 | 172 | alpha_1_list =model.alpha_1_log 173 | alpha_2_list =model.alpha_2_log 174 | 175 | N = len(alpha_1_list) 176 | 177 | fig_3 = plt.figure(3, figsize=(7, 5)) 178 | ax = fig_3.add_subplot(111) 179 | ax.plot(alpha_1_list, label='$k_1$ pred') 180 | ax.plot(alpha_2_list, label='$k_2$ pred') 181 | ax.plot(2 * np.ones(N), '--', label='$k_1$ exact', color='C0') 182 | ax.plot(np.ones(N), '--', label='$k_2$ exact', color='C1') 183 | ax.set_xlabel('iterations') 184 | ax.set_ylabel('thermal diffusivity') 185 | ax.set_xticklabels(('','0','50,000', '100,000', '150,000','200,000')) 186 | plt.legend() 187 | plt.tight_layout() 188 | plt.show() 189 | 190 | 191 | 192 | 193 | 194 | -------------------------------------------------------------------------------- /Stefan 1D2P/Stefan1D_2p_models_tf.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import timeit 4 | 5 | class Sampler: 6 | # Initialize the class 7 | def __init__(self, dim, coords, func, name=None): 8 | self.dim = dim 9 | self.coords = coords 10 | self.func = func 11 | self.name = name 12 | 13 | def sample(self, N): 14 | x = self.coords[0:1, :] + (self.coords[1:2, :] - self.coords[0:1, :]) * np.random.uniform(0, 1, size=(N, self.dim)) 15 | y = self.func(x) 16 | return x, y 17 | 18 | class DataSampler: 19 | # Initialize the class 20 | def __init__(self, X, Y, name = None): 21 | self.X = X 22 | self.Y = Y 23 | self.N = self.X.shape[0] 24 | 25 | def sample(self, batch_size): 26 | idx = np.random.choice(self.N, batch_size, replace=True) 27 | X_batch = self.X[idx, :] 28 | Y_batch = self.Y[idx, :] 29 | return X_batch, Y_batch 30 | 31 | class Stefan1D_2P_direct: 32 | def __init__(self, layers_u, layers_s, bcs_sampler, ics_sampler, res_sampler): 33 | # Normalization constants 34 | X, _ = res_sampler.sample(np.int32(1e5)) 35 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 36 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 37 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 38 | 39 | # Samplers 40 | self.bcs_sampler = bcs_sampler 41 | self.ics_sampler = ics_sampler 42 | self.res_sampler = res_sampler 43 | 44 | # Initialize network weights and biases 45 | self.layers_u = layers_u 46 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 47 | 48 | self.layers_s = layers_s 49 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 50 | 51 | # Define Tensorflow session 52 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 53 | 54 | # Define placeholders and computational graph 55 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 56 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 57 | 58 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 59 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 60 | 61 | self.x_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 62 | self.t_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 63 | self.u_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 64 | 65 | self.x_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 66 | self.t_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 67 | self.u_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 68 | 69 | self.x_ic1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 70 | self.t_ic1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 71 | self.u_ic1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 72 | 73 | self.x_ic2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 74 | self.t_ic2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 75 | self.u_ic2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 76 | 77 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 78 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 79 | 80 | # Evaluate predictions 81 | self.s_pred = self.net_s(self.t_u_tf) 82 | self.u1_pred, self.u2_pred = self.net_u1u2(self.x_r_tf, self.t_r_tf) 83 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 84 | 85 | self.u1_0_pred,_ = self.net_u1u2(self.x_ic1_tf, self.t_ic1_tf) 86 | _, self.u2_0_pred = self.net_u1u2(self.x_ic2_tf, self.t_ic2_tf) 87 | 88 | self.u1_bc_pred, _ = self.net_u1u2(self.x_bc1_tf, self.t_bc1_tf) 89 | _, self.u2_bc_pred = self.net_u1u2(self.x_bc2_tf, self.t_bc2_tf) 90 | 91 | self.s_bc1_pred, self.s_bc2_pred = self.net_u1u2((self.net_s(self.t_r_tf) - self.mu_x) / self.sigma_x, self.t_r_tf) 92 | 93 | self.r_u1_pred, self.r_u2_pred = self.net_r_u1u2(self.x_r_tf, self.t_r_tf) 94 | 95 | self.r_Nc_pred = self.net_r_Nc(self.t_r_tf) 96 | 97 | # Boundary loss 98 | self.loss_u1_bc = tf.reduce_mean(tf.square(self.u1_bc_pred - self.u_bc1_tf)) 99 | self.loss_u2_bc = tf.reduce_mean(tf.square(self.u2_bc_pred - self.u_bc2_tf)) 100 | self.loss_u_bcs = self.loss_u1_bc + self.loss_u2_bc 101 | 102 | # Initial Loss 103 | self.loss_u1_ic = tf.reduce_mean(tf.square(self.u1_0_pred - self.u_ic1_tf)) 104 | self.loss_u2_ic = tf.reduce_mean(tf.square(self.u2_0_pred - self.u_ic2_tf)) 105 | self.loss_u_ics = self.loss_u1_ic + self.loss_u2_ic 106 | 107 | # Stefan loss 108 | self.loss_Sbc1 = tf.reduce_mean(tf.square(self.s_bc1_pred)) # u1(s(t),t) = u2(s(t), t) = 0 109 | self.loss_Sbc2 = tf.reduce_mean(tf.square(self.s_bc2_pred)) # u1(s(t),t) = u2(s(t), t) = 0 110 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.t_ic1_tf) - 0.5)) # s(0) = 0.5 111 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred)) # Neumann Condition 112 | 113 | self.loss_Scs = self.loss_Sbc1 + self.loss_Sbc2 + self.loss_s_0 + self.loss_SNc 114 | 115 | # Residual loss 116 | self.loss_res_u1 = tf.reduce_mean(tf.square(self.r_u1_pred)) 117 | self.loss_res_u2 = tf.reduce_mean(tf.square(self.r_u2_pred)) 118 | self.loss_res = self.loss_res_u1 + self.loss_res_u2 119 | 120 | # Total loss 121 | self.loss = self.loss_res + self.loss_u_ics + self.loss_u_bcs + self.loss_Scs 122 | 123 | # Define optimizer with learning rate schedule 124 | self.global_step = tf.Variable(0, trainable=False) 125 | starter_learning_rate = 1e-3 126 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 127 | 1000, 0.9, staircase=False) 128 | # Passing global_step to minimize() will increment it at each step. 129 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 130 | 131 | self.loss_bcs_log = [] 132 | self.loss_ics_log = [] 133 | self.loss_Scs_log = [] 134 | self.loss_res_log = [] 135 | self.saver = tf.train.Saver() 136 | 137 | # Initialize Tensorflow variables 138 | init = tf.global_variables_initializer() 139 | self.sess.run(init) 140 | 141 | # Xavier initialization 142 | def xavier_init(self, size): 143 | in_dim = size[0] 144 | out_dim = size[1] 145 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 146 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 147 | dtype=tf.float32) 148 | 149 | # Initialize network weights and biases using Xavier initialization 150 | def initialize_NN(self, layers): 151 | weights = [] 152 | biases = [] 153 | num_layers = len(layers) 154 | for l in range(0, num_layers - 1): 155 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 156 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 157 | weights.append(W) 158 | biases.append(b) 159 | return weights, biases 160 | 161 | # Evaluates the forward pass 162 | def forward_pass(self, H, weights, biases): 163 | num_layers = len(weights) 164 | for l in range(0, num_layers - 2): 165 | W = weights[l] 166 | b = biases[l] 167 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 168 | W = weights[-1] 169 | b = biases[-1] 170 | H = tf.add(tf.matmul(H, W), b) 171 | return H 172 | 173 | # Forward pass for u1, u2 174 | def net_u1u2(self, x, t): 175 | u = self.forward_pass(tf.concat([x, t], 1), self.weights_u, self.biases_u) 176 | u1 = u[:,0:1] 177 | u2 = u[:,1:2] 178 | return u1, u2 179 | 180 | def net_u1u2_x(self, x, t): 181 | u1, u2 = self.net_u1u2(x, t) 182 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 183 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 184 | 185 | return u1_x, u2_x 186 | 187 | # Forward pass for u 188 | def net_s(self, t): 189 | s = self.forward_pass(t, self.weights_s, self.biases_s) 190 | return s 191 | 192 | # Forward pass for u 193 | def net_u(self, x, t): 194 | u1, u2 = self.net_u1u2(x, t) 195 | s = self.net_s(t) # consider normalization 196 | 197 | # demoralizing x coordinates 198 | x_hat = x * self.sigma_x + self.mu_x 199 | # consider s = x_hat 200 | u = tf.multiply(u1, 0.5 * (tf.sign(s - x_hat) + 1)) + tf.multiply(u2, 0.5 * (tf.sign(x_hat - s) + 1)) 201 | return u 202 | 203 | # Forward pass for residual 204 | def net_r_u1u2(self, x, t): 205 | u1, u2 = self.net_u1u2(x, t) 206 | 207 | u1_t = tf.gradients(u1, t)[0] / self.sigma_t 208 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 209 | u1_xx = tf.gradients(u1_x, x)[0] / self.sigma_x 210 | r_u1 = u1_t - 2 * u1_xx 211 | 212 | u2_t = tf.gradients(u2, t)[0] / self.sigma_t 213 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 214 | u2_xx = tf.gradients(u2_x, x)[0] / self.sigma_x 215 | r_u2 = u2_t - u2_xx 216 | 217 | return r_u1, r_u2 218 | 219 | def net_r_Nc(self, t): 220 | s = self.net_s(t) 221 | s_t = tf.gradients(s, t)[0] / self.sigma_t 222 | 223 | # Normalizing s 224 | s = (s - self.mu_x) / self.sigma_x 225 | 226 | u1_x, u2_x = self.net_u1u2_x(s, t) 227 | 228 | residual = s_t - u2_x + 2 * u1_x 229 | return residual 230 | 231 | def fetch_minibatch(self, sampler, N): 232 | X, Y = sampler.sample(N) 233 | X = (X - self.mu_X) / self.sigma_X 234 | return X, Y 235 | 236 | def train(self, nIter=10000, batch_size=128): 237 | start_time = timeit.default_timer() 238 | 239 | for it in range(nIter): 240 | # Fetch boundary and data mini-batches 241 | X_bc1_batch, u1_bc_batch = self.fetch_minibatch(self.bcs_sampler[0], batch_size) 242 | X_bc2_batch, u2_bc_batch = self.fetch_minibatch(self.bcs_sampler[1], batch_size) 243 | X_ic1_batch, u1_ic_batch = self.fetch_minibatch(self.ics_sampler[0], batch_size) 244 | X_ic2_batch, u2_ic_batch = self.fetch_minibatch(self.ics_sampler[1], batch_size) 245 | 246 | # Fetch residual mini-batch 247 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 248 | 249 | # Define a dictionary for associating placeholders with data 250 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 251 | self.x_bc1_tf: X_bc1_batch[:, 0:1], self.t_bc1_tf: X_bc1_batch[:, 1:2], 252 | self.u_bc1_tf: u1_bc_batch, 253 | self.x_bc2_tf: X_bc2_batch[:, 0:1], self.t_bc2_tf: X_bc2_batch[:, 1:2], 254 | self.u_bc2_tf: u2_bc_batch, 255 | self.x_ic1_tf: X_ic1_batch[:, 0:1], self.t_ic1_tf: X_ic1_batch[:, 1:2], 256 | self.u_ic1_tf: u1_ic_batch, 257 | self.x_ic2_tf: X_ic2_batch[:, 0:1], self.t_ic2_tf: X_ic2_batch[:, 1:2], 258 | self.u_ic2_tf: u2_ic_batch, 259 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2]} 260 | 261 | self.sess.run(self.train_op, tf_dict) 262 | 263 | # Print 264 | if it % 10 == 0: 265 | elapsed = timeit.default_timer() - start_time 266 | loss_value = self.sess.run(self.loss, tf_dict) 267 | loss_bcs_value, loss_ics_value, loss_res_value = self.sess.run( 268 | [self.loss_u_bcs, self.loss_u_ics, self.loss_res], tf_dict) 269 | self.loss_bcs_log.append(loss_bcs_value) 270 | self.loss_ics_log.append(loss_ics_value) 271 | self.loss_res_log.append(loss_res_value) 272 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_ics: %.3e, Loss_res: %.3e, Time: %.2f' % 273 | (it, loss_value, loss_bcs_value, loss_ics_value, loss_res_value, elapsed)) 274 | start_time = timeit.default_timer() 275 | 276 | # Predictions for u 277 | def predict_u(self, X_star): 278 | X_star = (X_star - self.mu_X) / self.sigma_X 279 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 280 | u_star = self.sess.run(self.u_pred, tf_dict) 281 | return u_star 282 | 283 | # Predictions for s 284 | def predict_s(self, X_star): 285 | X_star = (X_star - self.mu_X) / self.sigma_X 286 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 287 | s_star = self.sess.run(self.s_pred, tf_dict) 288 | return s_star 289 | 290 | 291 | class Stefan1D_2P_inverse_I: 292 | def __init__(self, layers_u, layers_s, ics_sampler, ft_sampler, res_sampler): 293 | # Normalization constants 294 | X, _ = res_sampler.sample(np.int32(1e5)) 295 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 296 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 297 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 298 | 299 | # Samplers 300 | self.ics_sampler = ics_sampler 301 | self.ft_sampler = ft_sampler 302 | self.res_sampler = res_sampler 303 | 304 | # Initialize network weights and biases 305 | self.layers_u = layers_u 306 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 307 | 308 | self.layers_s = layers_s 309 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 310 | 311 | # Define Tensorflow session 312 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 313 | 314 | # Define placeholders and computational graph 315 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 316 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 317 | 318 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 319 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 320 | 321 | self.x1_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 322 | self.t1_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 323 | self.u1_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 324 | 325 | self.x2_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 326 | self.t2_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 327 | self.u2_ic_tf = tf.placeholder(tf.float32, shape=(None, 1)) 328 | 329 | self.x1_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 330 | self.t1_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 331 | self.u1_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 332 | 333 | self.x2_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 334 | self.t2_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 335 | self.u2_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 336 | 337 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 338 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 339 | 340 | # Evaluate predictions 341 | self.s_pred = self.net_s(self.t_u_tf) 342 | self.u1_pred, self.u2_pred = self.net_u1u2(self.x_r_tf, self.t_r_tf) 343 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 344 | 345 | self.u1_0_pred, _ = self.net_u1u2(self.x1_ic_tf, self.t1_ic_tf) 346 | _, self.u2_0_pred = self.net_u1u2(self.x2_ic_tf, self.t2_ic_tf) 347 | 348 | self.u1_T_pred, _ = self.net_u1u2(self.x1_T_tf, self.t1_T_tf) 349 | _, self.u2_T_pred = self.net_u1u2(self.x2_T_tf, self.t2_T_tf) 350 | 351 | self.s_bc1_pred, self.s_bc2_pred = self.net_u1u2((self.net_s(self.t_r_tf) - self.mu_x) / self.sigma_x, self.t_r_tf) 352 | 353 | self.r_u1_pred, self.r_u2_pred = self.net_r_u1u2(self.x_r_tf, self.t_r_tf) 354 | 355 | self.r_Nc_pred = self.net_r_Nc(self.t_r_tf) 356 | 357 | # Boundary loss 358 | self.loss_u1_T = tf.reduce_mean(tf.square(self.u1_T_pred - self.u1_T_tf)) 359 | self.loss_u2_T = tf.reduce_mean(tf.square(self.u2_T_pred - self.u2_T_tf)) 360 | self.loss_u_T = self.loss_u1_T + self.loss_u2_T 361 | 362 | # Initial Loss 363 | self.loss_u1_ic = tf.reduce_mean(tf.square(self.u1_0_pred - self.u1_ic_tf)) 364 | self.loss_u2_ic = tf.reduce_mean(tf.square(self.u2_0_pred - self.u2_ic_tf)) 365 | self.loss_u_ics = self.loss_u1_ic + self.loss_u2_ic 366 | 367 | # Stefan loss 368 | self.loss_Sbc1 = tf.reduce_mean(tf.square(self.s_bc1_pred)) # u1(s(t),t) = u2(s(t), t) = 0 369 | self.loss_Sbc2 = tf.reduce_mean(tf.square(self.s_bc2_pred)) # u1(s(t),t) = u2(s(t), t) = 0 370 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.t1_ic_tf) - 0.5)) # s(0) = 0.5 371 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred)) # Neumann Condition 372 | 373 | self.loss_Scs = self.loss_Sbc1 + self.loss_Sbc2 + self.loss_s_0 + self.loss_SNc 374 | 375 | # Residual loss 376 | self.loss_res_u1 = tf.reduce_mean(tf.square(self.r_u1_pred)) 377 | self.loss_res_u2 = tf.reduce_mean(tf.square(self.r_u2_pred)) 378 | self.loss_res = self.loss_res_u1 + self.loss_res_u2 379 | 380 | # Total loss 381 | self.loss_u = self.loss_u_ics + self.loss_u_T 382 | self.loss = self.loss_res + self.loss_u + self.loss_Scs 383 | 384 | # Define optimizer with learning rate schedule 385 | self.global_step = tf.Variable(0, trainable=False) 386 | starter_learning_rate = 1e-3 387 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 388 | 1000, 0.9, staircase=False) 389 | # Passing global_step to minimize() will increment it at each step. 390 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 391 | 392 | self.loss_u_log = [] 393 | self.loss_Scs_log = [] 394 | self.loss_res_log = [] 395 | self.saver = tf.train.Saver() 396 | 397 | # Initialize Tensorflow variables 398 | init = tf.global_variables_initializer() 399 | self.sess.run(init) 400 | 401 | # Xavier initialization 402 | def xavier_init(self, size): 403 | in_dim = size[0] 404 | out_dim = size[1] 405 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 406 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 407 | dtype=tf.float32) 408 | 409 | # Initialize network weights and biases using Xavier initialization 410 | def initialize_NN(self, layers): 411 | weights = [] 412 | biases = [] 413 | num_layers = len(layers) 414 | for l in range(0, num_layers - 1): 415 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 416 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 417 | weights.append(W) 418 | biases.append(b) 419 | return weights, biases 420 | 421 | # Evaluates the forward pass 422 | def forward_pass(self, H, weights, biases): 423 | num_layers = len(weights) 424 | for l in range(0, num_layers - 2): 425 | W = weights[l] 426 | b = biases[l] 427 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 428 | W = weights[-1] 429 | b = biases[-1] 430 | H = tf.add(tf.matmul(H, W), b) 431 | return H 432 | 433 | # Forward pass for u1, u2 434 | def net_u1u2(self, x, t): 435 | u = self.forward_pass(tf.concat([x, t], 1), self.weights_u, self.biases_u) 436 | u1 = u[:,0:1] 437 | u2 = u[:,1:2] 438 | return u1, u2 439 | 440 | def net_u1u2_x(self, x, t): 441 | u1, u2 = self.net_u1u2(x, t) 442 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 443 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 444 | 445 | return u1_x, u2_x 446 | 447 | # Forward pass for s 448 | def net_s(self, t): 449 | s = self.forward_pass(t, self.weights_s, self.biases_s) 450 | return s 451 | 452 | # Forward pass for u 453 | def net_u(self, x, t): 454 | u1, u2 = self.net_u1u2(x, t) 455 | s = self.net_s(t) # consider normalization 456 | 457 | # demoralizing x coordinates 458 | x_hat = x * self.sigma_x + self.mu_x 459 | # consider s = x_hat 460 | u = tf.multiply(u1, 0.5 * (tf.sign(s - x_hat) + 1)) + tf.multiply(u2, 0.5 * (tf.sign(x_hat - s) + 1)) 461 | return u 462 | 463 | # Forward pass for residual 464 | def net_r_u1u2(self, x, t): 465 | u1, u2 = self.net_u1u2(x, t) 466 | 467 | u1_t = tf.gradients(u1, t)[0] / self.sigma_t 468 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 469 | u1_xx = tf.gradients(u1_x, x)[0] / self.sigma_x 470 | r_u1 = u1_t - 2 * u1_xx 471 | 472 | u2_t = tf.gradients(u2, t)[0] / self.sigma_t 473 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 474 | u2_xx = tf.gradients(u2_x, x)[0] / self.sigma_x 475 | r_u2 = u2_t - u2_xx 476 | 477 | return r_u1, r_u2 478 | 479 | def net_r_Nc(self, t): 480 | s = self.net_s(t) 481 | s_t = tf.gradients(s, t)[0] / self.sigma_t 482 | 483 | # Normalizing s 484 | s = (s - self.mu_x) / self.sigma_x 485 | 486 | u1_x, u2_x = self.net_u1u2_x(s, t) 487 | residual = s_t - u2_x + 2 * u1_x 488 | return residual 489 | 490 | def fetch_minibatch(self, sampler, N): 491 | X, Y = sampler.sample(N) 492 | X = (X - self.mu_X) / self.sigma_X 493 | return X, Y 494 | 495 | def train(self, nIter=10000, batch_size=128): 496 | start_time = timeit.default_timer() 497 | 498 | for it in range(nIter): 499 | # Fetch boundary and data mini-batches 500 | X1_ic_batch, u1_ic_batch = self.fetch_minibatch(self.ics_sampler[0], batch_size) 501 | X2_ic_batch, u2_ic_batch = self.fetch_minibatch(self.ics_sampler[1], batch_size) 502 | X1_T_batch, u1_T_batch = self.fetch_minibatch(self.ft_sampler[0], batch_size) 503 | X2_T_batch, u2_T_batch = self.fetch_minibatch(self.ft_sampler[1], batch_size) 504 | # Fetch residual mini-batch 505 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 506 | 507 | # Define a dictionary for associating placeholders with data 508 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 509 | self.x1_ic_tf: X1_ic_batch[:, 0:1], self.t1_ic_tf: X1_ic_batch[:, 1:2], 510 | self.u1_ic_tf: u1_ic_batch, 511 | self.x2_ic_tf: X2_ic_batch[:, 0:1], self.t2_ic_tf: X2_ic_batch[:, 1:2], 512 | self.u2_ic_tf: u2_ic_batch, 513 | self.x1_T_tf: X1_T_batch[:, 0:1], self.t1_T_tf: X1_T_batch[:, 1:2], 514 | self.u1_T_tf: u1_T_batch, 515 | self.x2_T_tf: X2_T_batch[:, 0:1], self.t2_T_tf: X2_T_batch[:, 1:2], 516 | self.u2_T_tf: u2_T_batch, 517 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2]} 518 | 519 | self.sess.run(self.train_op, tf_dict) 520 | 521 | # Print 522 | if it % 10 == 0: 523 | elapsed = timeit.default_timer() - start_time 524 | loss_value = self.sess.run(self.loss, tf_dict) 525 | loss_u_value, loss_Scs_value, loss_res_value = self.sess.run( 526 | [self.loss_u, self.loss_Scs, self.loss_res], tf_dict) 527 | self.loss_u_log.append(loss_u_value) 528 | self.loss_Scs_log.append(loss_Scs_value) 529 | self.loss_res_log.append(loss_res_value) 530 | print('It: %d, Loss: %.3e, Loss_u: %.3e, Loss_Scs: %.3e, Loss_res: %.3e, Time: %.2f' % 531 | (it, loss_value, loss_u_value, loss_Scs_value, loss_res_value, elapsed)) 532 | start_time = timeit.default_timer() 533 | 534 | # Predictions for u 535 | def predict_u(self, X_star): 536 | X_star = (X_star - self.mu_X) / self.sigma_X 537 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 538 | u_star = self.sess.run(self.u_pred, tf_dict) 539 | return u_star 540 | 541 | # Predictions for s 542 | def predict_s(self, X_star): 543 | X_star = (X_star - self.mu_X) / self.sigma_X 544 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 545 | s_star = self.sess.run(self.s_pred, tf_dict) 546 | return s_star 547 | 548 | class Stefan1D_2P_inverse_II: 549 | def __init__(self, layers_u, layers_s, bcs_sampler, Sbc_sampler, SNc_sampler, res_sampler, data_sampler): 550 | # Normalization constants 551 | X, _ = res_sampler.sample(np.int32(1e5)) 552 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 553 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 554 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 555 | 556 | # Samplers 557 | self.bcs_sampler = bcs_sampler 558 | self.Sbc_sampler = Sbc_sampler 559 | self.SNc_sampler = SNc_sampler 560 | self.res_sampler = res_sampler 561 | self.data_sampler = data_sampler 562 | 563 | # Initialize network weights and biases 564 | self.layers_u = layers_u 565 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 566 | 567 | self.layers_s = layers_s 568 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 569 | 570 | # Define Tensorflow session 571 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 572 | 573 | # Define placeholders and computational graph 574 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 575 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 576 | 577 | self.x_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 578 | self.t_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 579 | self.u_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 580 | 581 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 582 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 583 | 584 | self.t_bc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 585 | self.s_bc_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 586 | 587 | self.x_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 588 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 589 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 590 | 591 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 592 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 593 | 594 | self.x_r_below_tf = tf.placeholder(tf.float32, shape=(None, 1)) 595 | self.t_r_below_tf = tf.placeholder(tf.float32, shape=(None, 1)) 596 | 597 | self.x_r_above_tf = tf.placeholder(tf.float32, shape=(None, 1)) 598 | self.t_r_above_tf = tf.placeholder(tf.float32, shape=(None, 1)) 599 | 600 | # Evaluate predictions 601 | self.s_pred = self.net_s(self.t_u_tf) 602 | self.u1_pred, self.u2_pred = self.net_u1u2(self.x_r_tf, self.t_r_tf) 603 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 604 | 605 | self.s_bc1_pred, self.s_bc2_pred = self.net_u1u2((self.net_s(self.t_bc_tf) - self.mu_x) / self.sigma_x, self.t_bc_tf) 606 | 607 | self.r_u1_pred, self.r_u2_pred = self.net_r_u1u2(self.x_r_tf, self.t_r_tf) 608 | 609 | self.r_Nc_pred = self.net_r_Nc(self.t_Nc_tf) 610 | 611 | self.u_data_pred = self.net_u(self.x_data_tf, self.t_data_tf) 612 | 613 | # Stefan Boundary loss 614 | self.loss_bc1 = tf.reduce_mean(tf.square(self.s_bc1_pred)) # u1(s(t),t) = u2(s(t), t) = 0 615 | self.loss_bc2 = tf.reduce_mean(tf.square(self.s_bc2_pred)) # u1(s(t),t) = u2(s(t), t) = 0 616 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.t_0_tf) - self.s_0_tf)) # s(0) = 0.5 617 | self.loss_Nc = tf.reduce_mean(tf.square(self.r_Nc_pred)) # Neumann Condition 618 | 619 | # Data loss 620 | self.loss_data = tf.reduce_mean(tf.square(self.u_data_pred - self.u_data_tf)) 621 | 622 | # Boundary loss 623 | self.loss_bcs = self.loss_bc1 + self.loss_bc2 + self.loss_s_0 + self.loss_Nc 624 | 625 | # Residual loss 626 | self.loss_res_u1 = tf.reduce_mean(tf.square(self.r_u1_pred)) 627 | self.loss_res_u2 = tf.reduce_mean(tf.square(self.r_u2_pred)) 628 | self.loss_res = self.loss_res_u1 + self.loss_res_u2 629 | 630 | # Total loss 631 | self.loss = self.loss_res + self.loss_data + self.loss_bcs 632 | 633 | # Define optimizer with learning rate schedule 634 | self.global_step = tf.Variable(0, trainable=False) 635 | starter_learning_rate = 1e-3 636 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 637 | 1000, 0.9, staircase=False) 638 | # Passing global_step to minimize() will increment it at each step. 639 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 640 | 641 | self.loss_bcs_log = [] 642 | self.loss_data_log = [] 643 | self.loss_res_log = [] 644 | self.saver = tf.train.Saver() 645 | 646 | # Initialize Tensorflow variables 647 | init = tf.global_variables_initializer() 648 | self.sess.run(init) 649 | 650 | # Xavier initialization 651 | def xavier_init(self, size): 652 | in_dim = size[0] 653 | out_dim = size[1] 654 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 655 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 656 | dtype=tf.float32) 657 | 658 | # Initialize network weights and biases using Xavier initialization 659 | def initialize_NN(self, layers): 660 | weights = [] 661 | biases = [] 662 | num_layers = len(layers) 663 | for l in range(0, num_layers - 1): 664 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 665 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 666 | weights.append(W) 667 | biases.append(b) 668 | return weights, biases 669 | 670 | # Evaluates the forward pass 671 | def forward_pass(self, H, weights, biases): 672 | num_layers = len(weights) 673 | for l in range(0, num_layers - 2): 674 | W = weights[l] 675 | b = biases[l] 676 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 677 | W = weights[-1] 678 | b = biases[-1] 679 | H = tf.add(tf.matmul(H, W), b) 680 | return H 681 | 682 | # Forward pass for u1, u2 683 | def net_u1u2(self, x, t): 684 | u = self.forward_pass(tf.concat([x, t], 1), self.weights_u, self.biases_u) 685 | u1 = u[:,0:1] 686 | u2 = u[:,1:2] 687 | return u1, u2 688 | 689 | def net_u1u2_x(self, x, t): 690 | u1, u2 = self.net_u1u2(x, t) 691 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 692 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 693 | 694 | return u1_x, u2_x 695 | 696 | # Forward pass for s 697 | def net_s(self, t): 698 | s = self.forward_pass(t, self.weights_s, self.biases_s) 699 | return s 700 | 701 | # Forward pass for u 702 | def net_u(self, x, t): 703 | u1, u2 = self.net_u1u2(x, t) 704 | s = self.net_s(t) # consider normalization 705 | 706 | # demoralizing x coordinates 707 | x_hat = x * self.sigma_x + self.mu_x 708 | # consider s = x_hat 709 | u = tf.multiply(u1, 0.5 * (tf.sign(s - x_hat) + 1)) + tf.multiply(u2, 0.5 * (tf.sign(x_hat - s) + 1)) 710 | return u 711 | 712 | # Forward pass for residual 713 | def net_r_u1u2(self, x, t): 714 | u1, u2 = self.net_u1u2(x, t) 715 | 716 | u1_t = tf.gradients(u1, t)[0] / self.sigma_t 717 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 718 | u1_xx = tf.gradients(u1_x, x)[0] / self.sigma_x 719 | r_u1 = u1_t - 2 * u1_xx 720 | 721 | u2_t = tf.gradients(u2, t)[0] / self.sigma_t 722 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 723 | u2_xx = tf.gradients(u2_x, x)[0] / self.sigma_x 724 | r_u2 = u2_t - u2_xx 725 | 726 | return r_u1, r_u2 727 | 728 | def net_r_Nc(self, t): 729 | s = self.net_s(t) 730 | s_t = tf.gradients(s, t)[0] / self.sigma_t 731 | 732 | # Normalizing s 733 | s = (s - self.mu_x) / self.sigma_x 734 | 735 | u1_x, u2_x = self.net_u1u2_x(s, t) 736 | 737 | residual = s_t - u2_x + 2 * u1_x 738 | return residual 739 | 740 | def fetch_minibatch(self, sampler, N): 741 | X, Y = sampler.sample(N) 742 | X = (X - self.mu_X) / self.sigma_X 743 | return X, Y 744 | 745 | def split_minibatch(self, X_batch, s): 746 | 747 | # denormalizing minibatches 748 | X_batch_original = X_batch * self.sigma_X + self.mu_X 749 | 750 | mask_above = (X_batch_original[:, 0:1] >= s) 751 | mask_below = (X_batch_original[:, 0:1] < s) 752 | 753 | X_above_batch = X_batch_original[mask_above[:, 0]] 754 | X_below_batch = X_batch_original[mask_below[:, 0]] 755 | 756 | # Normalizing minibatches back 757 | X_above_batch = (X_above_batch - self.mu_X) / self.sigma_X 758 | X_below_batch = (X_below_batch - self.mu_X) / self.sigma_X 759 | 760 | return X_above_batch, X_below_batch 761 | 762 | def train(self, nIter=10000, batch_size=128): 763 | start_time = timeit.default_timer() 764 | 765 | for it in range(nIter): 766 | # Fetch boundary and data mini-batches 767 | X_0_batch, s_0_batch = self.fetch_minibatch(self.bcs_sampler, batch_size) 768 | X_bc_batch, u_bc_batch = self.fetch_minibatch(self.Sbc_sampler, batch_size) 769 | X_SNc_batch, _ = self.fetch_minibatch(self.SNc_sampler, batch_size) 770 | X_data_batch, u_data_batch = self.fetch_minibatch(self.data_sampler, batch_size) 771 | 772 | # Fetch residual mini-batch 773 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 774 | 775 | # Define a dictionary for associating placeholders with data 776 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 777 | self.t_0_tf: X_0_batch[:, 1:2], self.s_0_tf: s_0_batch, 778 | self.t_bc_tf: X_bc_batch[:, 1:2], self.s_bc_tf: u_bc_batch, 779 | self.t_Nc_tf: X_SNc_batch[:, 1:2], 780 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2], 781 | self.x_data_tf: X_data_batch[:, 0:1], self.t_data_tf: X_data_batch[:, 1:2], 782 | self.u_data_tf: u_data_batch} 783 | 784 | self.sess.run(self.train_op, tf_dict) 785 | 786 | # Print 787 | if it % 10 == 0: 788 | elapsed = timeit.default_timer() - start_time 789 | loss_value = self.sess.run(self.loss, tf_dict) 790 | loss_bcs_value, loss_data_value, loss_res_value = self.sess.run( 791 | [self.loss_bcs, self.loss_data, self.loss_res], tf_dict) 792 | self.loss_bcs_log.append(loss_bcs_value) 793 | self.loss_data_log.append(loss_data_value) 794 | self.loss_res_log.append(loss_res_value) 795 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_Data: %.3e, Loss_res: %.3e, Time: %.2f' % 796 | (it, loss_value, loss_bcs_value, loss_data_value, loss_res_value, elapsed)) 797 | start_time = timeit.default_timer() 798 | 799 | # Predictions for u 800 | def predict_u(self, X_star): 801 | X_star = (X_star - self.mu_X) / self.sigma_X 802 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 803 | u_star = self.sess.run(self.u_pred, tf_dict) 804 | return u_star 805 | 806 | # Predictions for s 807 | def predict_s(self, X_star): 808 | X_star = (X_star - self.mu_X) / self.sigma_X 809 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 810 | s_star = self.sess.run(self.s_pred, tf_dict) 811 | return s_star 812 | 813 | class Stefan1D_2P_inverse_III: 814 | def __init__(self, layers_u, layers_s, bcs_sampler, Sbc_sampler, SNc_sampler, res_sampler, data_sampler, method): 815 | # Normalization constants 816 | X, _ = res_sampler.sample(np.int32(1e5)) 817 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 818 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 819 | self.mu_t, self.sigma_t = self.mu_X[1], self.sigma_X[1] 820 | 821 | # Samplers 822 | self.bcs_sampler = bcs_sampler 823 | self.Sbc_sampler = Sbc_sampler 824 | self.SNc_sampler = SNc_sampler 825 | self.res_sampler = res_sampler 826 | self.data_sampler = data_sampler 827 | 828 | # Methpd 829 | self.method = method 830 | 831 | # Initialize network weights and biases 832 | self.layers_u = layers_u 833 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 834 | 835 | self.layers_s = layers_s 836 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 837 | 838 | # Adaptive constant 839 | self.beta = 0.9 840 | self.adaptive_constant_val = np.array(1.0) 841 | 842 | # Unknown parameters 843 | self.alpha_1 = tf.Variable(tf.ones([1], dtype=tf.float32) * 0.1, dtype=tf.float32) 844 | self.alpha_2 = tf.Variable(tf.ones([1], dtype=tf.float32) * 0.1, dtype=tf.float32) 845 | 846 | # Define Tensorflow session 847 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 848 | 849 | # Define placeholders and computational graph 850 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 851 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 852 | 853 | self.x_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 854 | self.t_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 855 | self.u_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 856 | 857 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 858 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 859 | 860 | self.t_bc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 861 | self.s_bc_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(0) 862 | 863 | self.x_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 864 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 865 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 866 | 867 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 868 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 869 | 870 | self.x_r_below_tf = tf.placeholder(tf.float32, shape=(None, 1)) 871 | self.t_r_below_tf = tf.placeholder(tf.float32, shape=(None, 1)) 872 | 873 | self.x_r_above_tf = tf.placeholder(tf.float32, shape=(None, 1)) 874 | self.t_r_above_tf = tf.placeholder(tf.float32, shape=(None, 1)) 875 | 876 | self.adaptive_constant_tf = tf.placeholder(tf.float32, shape=self.adaptive_constant_val.shape) 877 | 878 | # Evaluate predictions 879 | self.s_pred = self.net_s(self.t_u_tf) 880 | self.u1_pred, self.u2_pred = self.net_u1u2(self.x_r_tf, self.t_r_tf) 881 | self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf) 882 | 883 | self.s_bc1_pred, self.s_bc2_pred = self.net_u1u2((self.net_s(self.t_bc_tf) - self.mu_x) / self.sigma_x, self.t_bc_tf) 884 | 885 | self.r_u1_pred, _ = self.net_r_u1u2(self.x_r_below_tf, self.t_r_below_tf) 886 | _, self.r_u2_pred = self.net_r_u1u2(self.x_r_above_tf, self.t_r_above_tf) 887 | 888 | self.r_Nc_pred = self.net_r_Nc(self.t_Nc_tf) 889 | 890 | self.u_data_pred = self.net_u(self.x_data_tf, self.t_data_tf) 891 | 892 | # Stefan Boundary loss 893 | self.loss_bc1 = tf.reduce_mean(tf.square(self.s_bc1_pred)) # u1(s(t),t) = u2(s(t), t) = 0 894 | self.loss_bc2 = tf.reduce_mean(tf.square(self.s_bc2_pred)) # u1(s(t),t) = u2(s(t), t) = 0 895 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.t_0_tf) - self.s_0_tf)) # s(0) = 0.5 896 | self.loss_Nc = tf.reduce_mean(tf.square(self.r_Nc_pred)) # Neumann Condition 897 | 898 | # Data loss 899 | self.loss_data = self.adaptive_constant_tf * tf.reduce_mean(tf.square(self.u_data_pred - self.u_data_tf)) 900 | 901 | # Boundary loss 902 | self.loss_bcs = self.loss_bc1 + self.loss_bc2 + self.loss_s_0 + self.loss_Nc 903 | 904 | # Residual loss 905 | self.loss_res_u1 = tf.reduce_mean(tf.square(self.r_u1_pred)) 906 | self.loss_res_u2 = tf.reduce_mean(tf.square(self.r_u2_pred)) 907 | self.loss_res = self.loss_res_u1 + self.loss_res_u2 908 | 909 | # Total loss 910 | self.loss = self.loss_res + self.loss_data + self.loss_bcs 911 | 912 | # Define optimizer with learning rate schedule 913 | self.global_step = tf.Variable(0, trainable=False) 914 | starter_learning_rate = 1e-3 915 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 916 | 1000, 0.9, staircase=False) 917 | # Passing global_step to minimize() will increment it at each step. 918 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 919 | 920 | self.loss_bcs_log = [] 921 | self.loss_data_log = [] 922 | self.loss_res_log = [] 923 | 924 | self.alpha_1_log = [] 925 | self.alpha_2_log = [] 926 | 927 | self.saver = tf.train.Saver() 928 | 929 | # Generate dicts for gradients storage 930 | self.dict_gradients_res_layers = self.generate_grad_dict(self.layers_u) 931 | self.dict_gradients_data_layers = self.generate_grad_dict(self.layers_u) 932 | 933 | # Gradients Storage 934 | self.grad_res = [] 935 | self.grad_data = [] 936 | for i in range(len(self.layers_u) - 1): 937 | self.grad_res.append(tf.gradients(self.loss_res, self.weights_u[i], unconnected_gradients='zero')[0]) 938 | self.grad_data.append(tf.gradients(self.loss_data, self.weights_u[i], unconnected_gradients='zero')[0]) 939 | 940 | # Compute and store the adaptive constant 941 | self.adpative_constant_log = [] 942 | self.adaptive_constant_list = [] 943 | 944 | self.max_grad_res_list = [] 945 | self.mean_grad_data_list = [] 946 | 947 | for i in range(len(self.layers_u) - 1): 948 | self.max_grad_res_list.append(tf.reduce_max(tf.abs(self.grad_res[i]))) 949 | self.mean_grad_data_list.append(tf.reduce_mean(tf.abs(self.grad_data[i]))) 950 | 951 | self.max_grad_res = tf.reduce_max(tf.stack(self.max_grad_res_list)) 952 | self.mean_grad_data = tf.reduce_mean(tf.stack(self.mean_grad_data_list)) 953 | self.adaptive_constant = self.max_grad_res / self.mean_grad_data 954 | 955 | # Initialize Tensorflow variables 956 | init = tf.global_variables_initializer() 957 | self.sess.run(init) 958 | 959 | # Create dictionary to store gradients 960 | def generate_grad_dict(self, layers): 961 | num = len(layers) - 1 962 | grad_dict = {} 963 | for i in range(num): 964 | grad_dict['layer_{}'.format(i + 1)] = [] 965 | return grad_dict 966 | 967 | # Save gradients 968 | def save_gradients(self, tf_dict): 969 | num_layers = len(self.layers_u) 970 | for i in range(num_layers - 1): 971 | grad_res_value, grad_data_value = self.sess.run( 972 | [self.grad_res[i], self.grad_data[i]], feed_dict=tf_dict) 973 | 974 | # save gradients of loss_res and loss_bcs 975 | self.dict_gradients_res_layers['layer_' + str(i + 1)].append(grad_res_value.flatten()) 976 | self.dict_gradients_data_layers['layer_' + str(i + 1)].append(grad_data_value.flatten()) 977 | return None 978 | 979 | # Xavier initialization 980 | def xavier_init(self, size): 981 | in_dim = size[0] 982 | out_dim = size[1] 983 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 984 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 985 | dtype=tf.float32) 986 | 987 | # Initialize network weights and biases using Xavier initialization 988 | def initialize_NN(self, layers): 989 | weights = [] 990 | biases = [] 991 | num_layers = len(layers) 992 | for l in range(0, num_layers - 1): 993 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 994 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 995 | weights.append(W) 996 | biases.append(b) 997 | return weights, biases 998 | 999 | # Evaluates the forward pass 1000 | def forward_pass(self, H, weights, biases): 1001 | num_layers = len(weights) 1002 | for l in range(0, num_layers - 2): 1003 | W = weights[l] 1004 | b = biases[l] 1005 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 1006 | W = weights[-1] 1007 | b = biases[-1] 1008 | H = tf.add(tf.matmul(H, W), b) 1009 | return H 1010 | 1011 | # Forward pass for u1, u2 1012 | def net_u1u2(self, x, t): 1013 | u1u2 = self.forward_pass(tf.concat([x, t], 1), self.weights_u, self.biases_u) 1014 | u1 = u1u2[:,0:1] 1015 | u2 = u1u2[:,1:2] 1016 | return u1, u2 1017 | 1018 | def net_u1u2_x(self, x, t): 1019 | u1, u2 = self.net_u1u2(x, t) 1020 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 1021 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 1022 | return u1_x, u2_x 1023 | 1024 | # Forward pass for s 1025 | def net_s(self, t): 1026 | s = self.forward_pass(t, self.weights_s, self.biases_s) 1027 | return s 1028 | 1029 | # Forward pass for u 1030 | def net_u(self, x, t): 1031 | u1,u2 = self.net_u1u2(x, t) 1032 | s = self.net_s(t) # consider normalization 1033 | 1034 | # demoralizing x coordinates 1035 | x_hat = x * self.sigma_x + self.mu_x 1036 | # consider s = x_hat 1037 | u = tf.multiply(u1, 0.5 * (tf.sign(s - x_hat) + 1)) + tf.multiply(u2, 0.5 * (tf.sign(x_hat - s) + 1)) 1038 | return u 1039 | 1040 | # Forward pass for residual 1041 | def net_r_u1u2(self, x, t): 1042 | u1, u2 = self.net_u1u2(x, t) 1043 | u1_t = tf.gradients(u1, t)[0] / self.sigma_t 1044 | u2_t = tf.gradients(u2, t)[0] / self.sigma_t 1045 | 1046 | u1_x = tf.gradients(u1, x)[0] / self.sigma_x 1047 | u2_x = tf.gradients(u2, x)[0] / self.sigma_x 1048 | 1049 | u1_xx = tf.gradients(u1_x, x)[0] / self.sigma_x 1050 | u2_xx = tf.gradients(u2_x, x)[0] / self.sigma_x 1051 | 1052 | r_u1 = u1_t - self.alpha_1 * u1_xx 1053 | r_u2 = u2_t - self.alpha_2 * u2_xx 1054 | return r_u1, r_u2 1055 | 1056 | 1057 | def net_r_Nc(self, t): 1058 | s = self.net_s(t) 1059 | s_t = tf.gradients(s, t)[0] / self.sigma_t 1060 | 1061 | # Normalizing s 1062 | s = (s - self.mu_x) / self.sigma_x 1063 | 1064 | u1_x, u2_x = self.net_u1u2_x(s, t) 1065 | 1066 | residual = s_t - u2_x + 2 * u1_x 1067 | return residual 1068 | 1069 | def fetch_minibatch(self, sampler, N): 1070 | X, Y = sampler.sample(N) 1071 | X = (X - self.mu_X) / self.sigma_X 1072 | return X, Y 1073 | 1074 | def split_minibatch(self, X_batch): 1075 | 1076 | # denormalizing minibatches 1077 | X_batch_original = X_batch * self.sigma_X + self.mu_X 1078 | 1079 | s = self.predict_s(X_batch) 1080 | 1081 | mask_above = (X_batch_original[:, 0:1] >= s) 1082 | mask_below = (X_batch_original[:, 0:1] < s) 1083 | 1084 | X_above_batch = X_batch_original[mask_above[:, 0]] 1085 | X_below_batch = X_batch_original[mask_below[:, 0]] 1086 | 1087 | # Normalizing minibatches back 1088 | X_above_batch = (X_above_batch - self.mu_X) / self.sigma_X 1089 | X_below_batch = (X_below_batch - self.mu_X) / self.sigma_X 1090 | 1091 | return X_below_batch, X_above_batch 1092 | 1093 | def train(self, nIter=10000, batch_size=128): 1094 | start_time = timeit.default_timer() 1095 | 1096 | for it in range(nIter): 1097 | # Fetch boundary and data mini-batches 1098 | X_0_batch, s_0_batch = self.fetch_minibatch(self.bcs_sampler, batch_size) 1099 | X_bc_batch, u_bc_batch = self.fetch_minibatch(self.Sbc_sampler, batch_size) 1100 | X_SNc_batch, _ = self.fetch_minibatch(self.SNc_sampler, batch_size) 1101 | X_data_batch, u_data_batch = self.fetch_minibatch(self.data_sampler, batch_size) 1102 | 1103 | # Fetch residual mini-batch 1104 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 1105 | 1106 | X_below_batch, X_above_batch = self.split_minibatch(X_res_batch) 1107 | 1108 | # Define a dictionary for associating placeholders with data 1109 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.t_u_tf: X_res_batch[:, 1:2], 1110 | self.t_0_tf: X_0_batch[:, 1:2], self.s_0_tf: s_0_batch, 1111 | self.t_bc_tf: X_bc_batch[:, 1:2], self.s_bc_tf: u_bc_batch, 1112 | self.t_Nc_tf: X_SNc_batch[:, 1:2], 1113 | self.x_r_tf: X_res_batch[:, 0:1], self.t_r_tf: X_res_batch[:, 1:2], 1114 | self.x_r_below_tf: X_below_batch[:,0:1], self.t_r_below_tf: X_below_batch[:,1:2], 1115 | self.x_r_above_tf: X_above_batch[:,0:1], self.t_r_above_tf: X_above_batch[:,1:2], 1116 | self.x_data_tf: X_data_batch[:, 0:1], self.t_data_tf: X_data_batch[:, 1:2], 1117 | self.u_data_tf: u_data_batch, 1118 | self.adaptive_constant_tf: self.adaptive_constant_val} 1119 | 1120 | self.sess.run(self.train_op, tf_dict) 1121 | 1122 | # Print 1123 | if it % 10 == 0: 1124 | elapsed = timeit.default_timer() - start_time 1125 | loss_value = self.sess.run(self.loss, tf_dict) 1126 | loss_bcs_value, loss_data_value, loss_res_value = self.sess.run( 1127 | [self.loss_bcs, self.loss_data, self.loss_res], tf_dict) 1128 | self.loss_bcs_log.append(loss_bcs_value) 1129 | self.loss_data_log.append(loss_data_value) 1130 | self.loss_res_log.append(loss_res_value) 1131 | 1132 | alpha_1_value, alpha_2_value = self.sess.run([self.alpha_1, self.alpha_2]) 1133 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_Data: %.3e, Loss_res: %.3e, Time: %.2f' % 1134 | (it, loss_value, loss_bcs_value, loss_data_value, loss_res_value, elapsed)) 1135 | 1136 | print('Alpha_1: {}, Alpha_2: {}'.format(alpha_1_value, alpha_2_value)) 1137 | start_time = timeit.default_timer() 1138 | print('Adaptive Constant: {:.3f}'.format(self.adaptive_constant_val)) 1139 | 1140 | self.alpha_1_log.append(alpha_1_value) 1141 | self.alpha_2_log.append(alpha_2_value) 1142 | 1143 | if self.method in ['M2']: 1144 | adaptive_constant_value = self.sess.run(self.adaptive_constant, tf_dict) 1145 | self.adaptive_constant_val = adaptive_constant_value * (1.0 - self.beta) \ 1146 | + self.beta * self.adaptive_constant_val 1147 | self.adpative_constant_log.append(self.adaptive_constant_val) 1148 | 1149 | # Predictions for u 1150 | def predict_u(self, X_star): 1151 | X_star = (X_star - self.mu_X) / self.sigma_X 1152 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 1153 | u_star = self.sess.run(self.u_pred, tf_dict) 1154 | return u_star 1155 | 1156 | # Predictions for s 1157 | def predict_s(self, X_star): 1158 | X_star = (X_star - self.mu_X) / self.sigma_X 1159 | tf_dict = {self.t_u_tf: X_star[:, 1:2]} 1160 | s_star = self.sess.run(self.s_pred, tf_dict) 1161 | return s_star 1162 | 1163 | 1164 | 1165 | 1166 | 1167 | 1168 | -------------------------------------------------------------------------------- /Stefan 2D1P/Stefan 2D1P.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Stefan 2D1P/Stefan2D_direct.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from mpl_toolkits import mplot3d 4 | import matplotlib.pyplot as plt 5 | from scipy.interpolate import griddata 6 | import seaborn as sns 7 | from Stefan2D_models_tf import Sampler, DataSampler, Stefan2D_direct 8 | import pandas as pd 9 | import os 10 | 11 | if __name__ == '__main__': 12 | 13 | # Exact u 14 | def u(z): 15 | # x = (x, y, t) 16 | x = z[:, 0: 1] 17 | y = z[:, 1: 2] 18 | t = z[:, 2: 3] 19 | 20 | u = np.exp(1.25 * t - x + 0.5 * y + 0.5) - 1 21 | return u 22 | 23 | # Exact s 24 | def s(z): 25 | y = z[:, 0: 1] 26 | t = z[:, 1: 2] 27 | s = 0.5 * y + 1.25 * t + 0.5 28 | return s 29 | 30 | # Initial condition u(x, y, 0) = exp(-x + y/2 + 1/2) -1 31 | def f(z): 32 | x = z[:, 0: 1] 33 | y = z[:, 1: 2] 34 | f = np.exp(- x + 0.5 * y + 0.5) -1 35 | return f 36 | 37 | def z(x): 38 | N = x.shape[0] 39 | return np.zeros((N, 1)) 40 | 41 | # Domain boundaries 42 | ic_coords = np.array([[0.0, 0.0, 0.0], 43 | [2.25, 1.0, 0.0]]) 44 | 45 | bc1_coords = np.array([[0.0, 0.0, 0.0], 46 | [2.25, 0.0, 1.0]]) 47 | 48 | bc2_coords = np.array([[0.0, 0.0, 0.0], 49 | [0.0, 1.0, 1.0]]) 50 | 51 | bc3_coords = np.array([[0.0, 1.0, 0.0], 52 | [2.25, 1.0, 1.0]]) 53 | 54 | dom_coords = np.array([[0.0, 0.0, 0.0], 55 | [2.25, 1.0, 1.0]]) 56 | 57 | # Create Initial conditions samplers 58 | ics_sampler = Sampler(3, ic_coords, lambda x: f(x), name='Initial Condition') 59 | 60 | # Create boundary conditions samplers 61 | bc1_sampler = Sampler(3, bc1_coords, lambda x: u(x), name='Boundary Condition I') 62 | bc2_sampler = Sampler(3, bc2_coords, lambda x: u(x), name='Boundary Condition II') 63 | bc3_sampler = Sampler(3, bc3_coords, lambda x: u(x), name='Boundary Condition III') 64 | bcs_sampler = [bc1_sampler, bc2_sampler, bc3_sampler] 65 | 66 | # Create residual sampler 67 | res_sampler = Sampler(3, dom_coords, lambda x: u(x), name='Forcing') 68 | 69 | # Define model 70 | layers_u = [3, 100, 100, 100, 1] 71 | layers_s = [2, 100, 100, 100, 1] 72 | model = Stefan2D_direct(layers_u, layers_s, ics_sampler, bcs_sampler, res_sampler) 73 | 74 | # Train the model 75 | model.train(nIter=200, batch_size=128) 76 | 77 | # Test data 78 | nn = 200 79 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 80 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 81 | y, t = np.meshgrid(y, t) 82 | 83 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 84 | 85 | s_star = s(X_star) 86 | 87 | # Predictions 88 | s_pred = model.predict_s(X_star) 89 | 90 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 91 | print('The relative errror is: {:4e}'.format(error_s)) 92 | 93 | 94 | # Plot for s 95 | nn = 200 96 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 97 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 98 | y, t = np.meshgrid(y, t) 99 | 100 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 101 | 102 | s_star = s(X_star) 103 | s_pred = model.predict_s(X_star) 104 | 105 | S_pred = griddata(X_star, s_pred.flatten(), (y, t), method='cubic') 106 | S_star = griddata(X_star, s_star.flatten(), (y, t), method='cubic') 107 | 108 | fig_1 = plt.figure(5, figsize=(18, 5)) 109 | ax = fig_1.add_subplot(1, 3, 1, projection='3d') 110 | ax.plot_surface(y, t, S_star) 111 | ax.set_xlabel('y') 112 | ax.set_ylabel('t') 113 | ax.set_zlabel('s(y,t)') 114 | ax.set_title('Exact') 115 | 116 | ax = fig_1.add_subplot(1, 3, 2, projection='3d') 117 | ax.plot_surface(y, t, S_pred) 118 | ax.set_xlabel('y') 119 | ax.set_ylabel('t') 120 | ax.set_zlabel('s(y,t)') 121 | ax.set_title('Predicted') 122 | 123 | ax = fig_1.add_subplot(1, 3, 3, projection='3d') 124 | ax.plot_surface(y, t, np.abs(S_star - S_pred)) 125 | ax.set_xlabel('y') 126 | ax.set_ylabel('t') 127 | ax.set_zlabel('s(y,t)') 128 | ax.set_title('Absolute Error') 129 | 130 | plt.tight_layout() 131 | plt.show() 132 | 133 | # Plot for u 134 | T_list = [0.2, 0.4, 0.6, 0.8] 135 | nn = 200 136 | x = np.linspace(0, 2.25, nn)[:, None] 137 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 138 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 139 | 140 | for T in T_list: 141 | X, Y = np.meshgrid(x, y) 142 | T_star = T * np.ones_like(X) 143 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None], T_star.flatten()[:, None])) 144 | 145 | u_star = u(X_star) 146 | u_pred = model.predict_u(X_star) 147 | 148 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) 149 | 150 | U_star = griddata(X_star, u_star.flatten(), (X, Y), method='cubic') 151 | U_pred = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') 152 | 153 | for i in range(nn): 154 | for j in range(nn): 155 | X_ij = np.array([X[i,j], Y[i,j], T]).reshape(1,3) 156 | u_ij = u(X_ij) 157 | s_ij = s(np.array([Y[i,j], T]).reshape(1,2)) 158 | if X[i,j] > s_ij: 159 | U_star[i,j] = np.nan 160 | U_pred[i,j] = np.nan 161 | 162 | np.savetxt('pred_{}'.format(str(T)), U_pred, delimiter=',') 163 | np.savetxt('exact_{}'.format(str(T)), U_star, delimiter=',') 164 | 165 | 166 | for T in T_list: 167 | U_pred = np.loadtxt('pred_{}'.format(str(T)), delimiter=',') 168 | U_star = np.loadtxt('exact_{}'.format(str(T)), delimiter=',') 169 | 170 | fig = plt.figure(figsize=(18, 5)) 171 | plt.subplot(1, 3, 1) 172 | plt.pcolor(X, Y, U_star, cmap='jet') 173 | plt.colorbar() 174 | plt.xlabel(r'$x$') 175 | plt.ylabel(r'$y$') 176 | plt.title('Exact $u(x, y, {})$'.format(T)) 177 | 178 | plt.subplot(1, 3, 2) 179 | plt.pcolor(X, Y, U_pred, cmap='jet') 180 | plt.colorbar() 181 | plt.xlabel(r'$x$') 182 | plt.ylabel(r'$y$') 183 | plt.title('Predicted $u(x, y, {})$'.format(T)) 184 | 185 | plt.subplot(1, 3, 3) 186 | plt.pcolor(X, Y, np.abs(U_star - U_pred), cmap='jet') 187 | plt.colorbar(format='%.0e') 188 | plt.xlabel(r'$x$') 189 | plt.ylabel(r'$y$') 190 | plt.title('Absolute Error') 191 | 192 | plt.tight_layout() 193 | plt.show() 194 | 195 | 196 | 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /Stefan 2D1P/Stefan2D_inverse_I.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Mar 5 10:51:49 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | # -*- coding: utf-8 -*- 9 | """ 10 | Created on Sun Feb 23 16:26:40 2020 11 | 12 | @author: sifan 13 | """ 14 | 15 | import tensorflow as tf 16 | import numpy as np 17 | from mpl_toolkits import mplot3d 18 | import matplotlib.pyplot as plt 19 | from scipy.interpolate import griddata 20 | import seaborn as sns 21 | from Stefan2D_models_tf import Sampler, DataSampler, Stefan2D_inverse_I 22 | import pandas as pd 23 | import os 24 | 25 | 26 | if __name__ == '__main__': 27 | def u(z): 28 | # x = (x, y, t) 29 | x = z[:, 0: 1] 30 | y = z[:, 1: 2] 31 | t = z[:, 2: 3] 32 | 33 | u = np.exp(1.25 * t - x + 0.5 * y + 0.5) - 1 34 | return u 35 | 36 | def s(z): 37 | y = z[:, 0: 1] 38 | t = z[:, 1: 2] 39 | s = 0.5 * y + 1.25 * t + 0.5 40 | return s 41 | 42 | def f(z): 43 | # Initial condition u(x, y, 0) = exp(-x + y/2 + 1/2) -1 44 | x = z[:, 0: 1] 45 | y = z[:, 1: 2] 46 | 47 | f = np.exp(- x + 0.5 * y + 0.5) - 1 48 | return f 49 | 50 | def h(z): 51 | # Initial condition u(x, y, T) = exp(1.25 -x + y/2 + 1/2) -1 52 | x = z[:, 0: 1] 53 | y = z[:, 1: 2] 54 | f = np.exp(1.25 - x + 0.5 * y + 0.5) - 1 55 | return f 56 | 57 | def g1(z): 58 | # Boundary condition u(x, 0, t) = g1(x, t) 59 | x = z[:, 0: 1] 60 | t = z[:, 2: 3] 61 | 62 | g1 = np.exp(1.25 * t - x + 0.5) - 1 63 | return g1 64 | 65 | def g2(z): 66 | # Boundary condition u(0, y, t) = g2(y, t) 67 | y = z[:, 1: 2] 68 | t = z[:, 2: 3] 69 | 70 | g2 = np.exp(1.25 * t + 0.5 * y + 0.5) - 1 71 | return g2 72 | 73 | def z(x): 74 | N = x.shape[0] 75 | return np.zeros((N, 1)) 76 | 77 | # Domain boundaries 78 | ic_coords = np.array([[0.0, 0.0, 0.0], 79 | [2.25, 1.0, 0.0]]) 80 | ft_coords = np.array([[0.0, 0.0, 1.0], 81 | [2.25, 1.0, 1.0]]) 82 | 83 | dom_coords = np.array([[0.0, 0.0, 0.0], 84 | [2.25, 1.0, 1.0]]) 85 | 86 | # Create Initial conditions samplers 87 | ics_sampler = Sampler(3, ic_coords, lambda x: u(x), name='Initial Condition') 88 | 89 | ft_sampler = Sampler(3, ft_coords, lambda x: u(x)) 90 | # Create residual sampler 91 | res_sampler = Sampler(3, dom_coords, lambda x: u(x), name='Forcing') 92 | 93 | 94 | # Define model 95 | layers_u = [3, 100, 100, 100, 1] 96 | layers_s = [2, 100, 100, 100, 1] # or we can map s to (t, s(t)) 97 | model = Stefan2D_inverse_I(layers_u, layers_s, ics_sampler, ft_sampler, res_sampler) 98 | 99 | # Train the model 100 | model.train(nIter=40000, batch_size=128) 101 | 102 | # Test data 103 | nn = 200 104 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 105 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 106 | y, t = np.meshgrid(y, t) 107 | 108 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 109 | 110 | s_star = s(X_star) 111 | 112 | # Predictions 113 | s_pred = model.predict_s(X_star) 114 | 115 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 116 | print('The relative errror is: {:4e}'.format(error_s)) 117 | 118 | 119 | # Plot for s 120 | nn = 200 121 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 122 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 123 | y, t = np.meshgrid(y, t) 124 | 125 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 126 | 127 | s_star = s(X_star) 128 | s_pred = model.predict_s(X_star) 129 | 130 | S_pred = griddata(X_star, s_pred.flatten(), (y, t), method='cubic') 131 | S_star = griddata(X_star, s_star.flatten(), (y, t), method='cubic') 132 | 133 | fig_1 = plt.figure(5, figsize=(18, 5)) 134 | ax = fig_1.add_subplot(1, 3, 1, projection='3d') 135 | ax.plot_surface(y, t, S_star) 136 | ax.set_xlabel('y') 137 | ax.set_ylabel('t') 138 | ax.set_zlabel('s(y,t)') 139 | ax.set_title('Exact') 140 | 141 | ax = fig_1.add_subplot(1, 3, 2, projection='3d') 142 | ax.plot_surface(y, t, S_pred) 143 | ax.set_xlabel('y') 144 | ax.set_ylabel('t') 145 | ax.set_zlabel('s(y,t)') 146 | ax.set_title('Predicted') 147 | 148 | ax = fig_1.add_subplot(1, 3, 3, projection='3d') 149 | ax.plot_surface(y, t, np.abs(S_star - S_pred)) 150 | ax.set_xlabel('y') 151 | ax.set_ylabel('t') 152 | ax.set_zlabel('s(y,t)') 153 | ax.set_title('Absolute Error') 154 | 155 | plt.tight_layout() 156 | plt.show() 157 | 158 | 159 | # Plot for u 160 | T_list = [0.2, 0.4, 0.6, 0.8] 161 | nn = 200 162 | x = np.linspace(0, 2.25, nn)[:, None] 163 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 164 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 165 | 166 | for T in T_list: 167 | X, Y = np.meshgrid(x, y) 168 | T_star = T * np.ones_like(X) 169 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None], T_star.flatten()[:, None])) 170 | 171 | u_star = u(X_star) 172 | u_pred = model.predict_u(X_star) 173 | 174 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) 175 | 176 | U_star = griddata(X_star, u_star.flatten(), (X, Y), method='cubic') 177 | U_pred = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') 178 | 179 | for i in range(nn): 180 | for j in range(nn): 181 | X_ij = np.array([X[i,j], Y[i,j], T]).reshape(1,3) 182 | u_ij = u(X_ij) 183 | s_ij = s(np.array([Y[i,j], T]).reshape(1,2)) 184 | if X[i,j] > s_ij: 185 | U_star[i,j] = np.nan 186 | U_pred[i,j] = np.nan 187 | 188 | np.savetxt('pred_{}'.format(str(T)), U_pred, delimiter=',') 189 | np.savetxt('exact_{}'.format(str(T)), U_star, delimiter=',') 190 | 191 | 192 | for T in T_list: 193 | U_pred = np.loadtxt('pred_{}'.format(str(T)), delimiter=',') 194 | U_star = np.loadtxt('exact_{}'.format(str(T)), delimiter=',') 195 | 196 | fig = plt.figure(figsize=(18, 5)) 197 | plt.subplot(1, 3, 1) 198 | plt.pcolor(X, Y, U_star, cmap='jet') 199 | plt.colorbar() 200 | plt.xlabel(r'$x$') 201 | plt.ylabel(r'$y$') 202 | plt.title('Exact $u(x, y, {})$'.format(T)) 203 | 204 | plt.subplot(1, 3, 2) 205 | plt.pcolor(X, Y, U_pred, cmap='jet') 206 | plt.colorbar() 207 | plt.xlabel(r'$x$') 208 | plt.ylabel(r'$y$') 209 | plt.title('Predicted $u(x, y, {})$'.format(T)) 210 | 211 | plt.subplot(1, 3, 3) 212 | plt.pcolor(X, Y, np.abs(U_star - U_pred), cmap='jet') 213 | plt.colorbar(format='%.0e') 214 | plt.xlabel(r'$x$') 215 | plt.ylabel(r'$y$') 216 | plt.title('Absolute Error') 217 | 218 | plt.tight_layout() 219 | plt.show() 220 | 221 | 222 | 223 | 224 | 225 | 226 | -------------------------------------------------------------------------------- /Stefan 2D1P/Stefan2D_inverse_II.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 23 16:26:40 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | from mpl_toolkits import mplot3d 11 | import matplotlib.pyplot as plt 12 | from scipy.interpolate import griddata 13 | import seaborn as sns 14 | from Stefan2D_models_tf import Sampler, DataSampler, Stefan2D_inverse_II 15 | import pandas as pd 16 | import os 17 | 18 | if __name__ == '__main__': 19 | 20 | # Exact u 21 | def u(z): 22 | # x = (x, y, t) 23 | x = z[:, 0: 1] 24 | y = z[:, 1: 2] 25 | t = z[:, 2: 3] 26 | 27 | u = np.exp(1.25 * t - x + 0.5 * y + 0.5) - 1 28 | return u 29 | 30 | # Exact s 31 | def s(z): 32 | y = z[:, 0: 1] 33 | t = z[:, 1: 2] 34 | s = 0.5 * y + 1.25 * t + 0.5 35 | return s 36 | 37 | def z(x): 38 | N = x.shape[0] 39 | return np.zeros((N, 1)) 40 | 41 | 42 | # Domain boundaries 43 | ic_coords = np.array([[0.0, 0.0, 0.0], 44 | [1.0, 1.0, 0.0]]) 45 | dom_coords = np.array([[0.0, 0.0, 0.0], 46 | [1.0, 1.0, 1.0]]) 47 | 48 | # Create boundary conditions samplers 49 | ics_sampler = Sampler(3, ic_coords, lambda x: z(x), name='Initial Condition') 50 | 51 | # Create residual sampler 52 | res_sampler = Sampler(3, dom_coords, lambda x: u(x), name='Forcing') 53 | 54 | data_X, data_u = res_sampler.sample(10**4) 55 | 56 | mask = data_X[:,0:1] < s(data_X[:,1:3]) 57 | data_X = data_X[mask[:,0]] 58 | data_u = data_u[mask[:,0]] 59 | 60 | num = 100 61 | data_X, data_u = DataSampler(data_X, data_u).sample(num) 62 | data_sampler = DataSampler(data_X, data_u) 63 | 64 | # Define model 65 | layers_u = [3, 100, 100, 100, 1] 66 | layers_s = [2, 100, 100, 100, 1] # or we can map s to (t, s(t)) 67 | model = Stefan2D_inverse_II(layers_u, layers_s, ics_sampler, res_sampler, data_sampler) 68 | 69 | model.train(nIter=200, batch_size=256) 70 | 71 | # Test data 72 | nn = 200 73 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 74 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 75 | y, t = np.meshgrid(y, t) 76 | 77 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 78 | 79 | s_star = s(X_star) 80 | 81 | # Predictions 82 | s_pred = model.predict_s(X_star) 83 | 84 | error_s = np.linalg.norm(s_star - s_pred, 2) / np.linalg.norm(s_star, 2) 85 | print('The relative errror is: {:4e}'.format(error_s)) 86 | 87 | 88 | # Plot for s 89 | nn = 200 90 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 91 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 92 | y, t = np.meshgrid(y, t) 93 | 94 | X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 95 | 96 | s_star = s(X_star) 97 | s_pred = model.predict_s(X_star) 98 | 99 | S_pred = griddata(X_star, s_pred.flatten(), (y, t), method='cubic') 100 | S_star = griddata(X_star, s_star.flatten(), (y, t), method='cubic') 101 | 102 | fig_1 = plt.figure(5, figsize=(18, 5)) 103 | ax = fig_1.add_subplot(1, 3, 1, projection='3d') 104 | ax.plot_surface(y, t, S_star) 105 | ax.set_xlabel('y') 106 | ax.set_ylabel('t') 107 | ax.set_zlabel('s(y,t)') 108 | ax.set_title('Exact') 109 | 110 | ax = fig_1.add_subplot(1, 3, 2, projection='3d') 111 | ax.plot_surface(y, t, S_pred) 112 | ax.set_xlabel('y') 113 | ax.set_ylabel('t') 114 | ax.set_zlabel('s(y,t)') 115 | ax.set_title('Predicted') 116 | 117 | ax = fig_1.add_subplot(1, 3, 3, projection='3d') 118 | ax.plot_surface(y, t, np.abs(S_star - S_pred)) 119 | ax.set_xlabel('y') 120 | ax.set_ylabel('t') 121 | ax.set_zlabel('s(y,t)') 122 | ax.set_title('Absolute Error') 123 | 124 | plt.tight_layout() 125 | plt.show() 126 | 127 | # Plot for u 128 | T_list = [0.2, 0.4, 0.6, 0.8] 129 | nn = 200 130 | x = np.linspace(0, 2.25, nn)[:, None] 131 | y = np.linspace(dom_coords[0, 1], dom_coords[1, 1], nn)[:, None] 132 | t = np.linspace(dom_coords[0, 2], dom_coords[1, 2], nn)[:, None] 133 | 134 | for T in T_list: 135 | X, Y = np.meshgrid(x, y) 136 | T_star = T * np.ones_like(X) 137 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None], T_star.flatten()[:, None])) 138 | 139 | u_star = u(X_star) 140 | u_pred = model.predict_u(X_star) 141 | 142 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) 143 | 144 | U_star = griddata(X_star, u_star.flatten(), (X, Y), method='cubic') 145 | U_pred = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') 146 | 147 | for i in range(nn): 148 | for j in range(nn): 149 | X_ij = np.array([X[i,j], Y[i,j], T]).reshape(1,3) 150 | u_ij = u(X_ij) 151 | s_ij = s(np.array([Y[i,j], T]).reshape(1,2)) 152 | if X[i,j] > s_ij: 153 | U_star[i,j] = np.nan 154 | U_pred[i,j] = np.nan 155 | 156 | np.savetxt('pred_{}'.format(str(T)), U_pred, delimiter=',') 157 | np.savetxt('exact_{}'.format(str(T)), U_star, delimiter=',') 158 | 159 | 160 | for T in T_list: 161 | U_pred = np.loadtxt('pred_{}'.format(str(T)), delimiter=',') 162 | U_star = np.loadtxt('exact_{}'.format(str(T)), delimiter=',') 163 | 164 | fig = plt.figure(figsize=(18, 5)) 165 | plt.subplot(1, 3, 1) 166 | plt.pcolor(X, Y, U_star, cmap='jet') 167 | plt.colorbar() 168 | plt.xlabel(r'$x$') 169 | plt.ylabel(r'$y$') 170 | plt.title('Exact $u(x, y, {})$'.format(T)) 171 | 172 | plt.subplot(1, 3, 2) 173 | plt.pcolor(X, Y, U_pred, cmap='jet') 174 | plt.colorbar() 175 | plt.xlabel(r'$x$') 176 | plt.ylabel(r'$y$') 177 | plt.title('Predicted $u(x, y, {})$'.format(T)) 178 | 179 | plt.subplot(1, 3, 3) 180 | plt.pcolor(X, Y, np.abs(U_star - U_pred), cmap='jet') 181 | plt.colorbar(format='%.0e') 182 | plt.xlabel(r'$x$') 183 | plt.ylabel(r'$y$') 184 | plt.title('Absolute Error') 185 | 186 | plt.tight_layout() 187 | plt.show() 188 | 189 | 190 | 191 | 192 | 193 | 194 | -------------------------------------------------------------------------------- /Stefan 2D1P/Stefan2D_models_tf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 23 16:28:51 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | import timeit 11 | 12 | 13 | class Sampler: 14 | # Initialize the class 15 | def __init__(self, dim, coords, func, name=None): 16 | self.dim = dim 17 | self.coords = coords 18 | self.func = func 19 | self.name = name 20 | 21 | def sample(self, N): 22 | x = self.coords[0:1, :] + (self.coords[1:2, :] - self.coords[0:1, :]) * np.random.uniform(0, 1, 23 | size=(N, self.dim)) 24 | y = self.func(x) 25 | return x, y 26 | 27 | 28 | class DataSampler: 29 | # Initialize the class 30 | def __init__(self, X, Y, name=None): 31 | self.X = X 32 | self.Y = Y 33 | self.N = self.X.shape[0] 34 | 35 | def sample(self, batch_size): 36 | idx = np.random.choice(self.N, batch_size, replace=True) 37 | X_batch = self.X[idx, :] 38 | Y_batch = self.Y[idx, :] 39 | return X_batch, Y_batch 40 | 41 | 42 | class Stefan2D_direct: 43 | def __init__(self, layers_u, layers_s, ics_sampler, bcs_sampler, res_sampler): 44 | # Normalization constants 45 | X, _ = res_sampler.sample(np.int32(1e5)) 46 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 47 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 48 | self.mu_y, self.sigma_y = self.mu_X[1], self.sigma_X[1] 49 | self.mu_t, self.sigma_t = self.mu_X[2], self.sigma_X[2] 50 | 51 | # Samplers 52 | self.ics_sampler = ics_sampler 53 | self.bcs_sampler = bcs_sampler 54 | self.res_sampler = res_sampler 55 | 56 | # Initialize network weights and biases 57 | self.layers_u = layers_u 58 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 59 | 60 | # Initialize encoder weights and biases 61 | self.layers_s = layers_s 62 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 63 | 64 | # Define Tensorflow session 65 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 66 | 67 | # Define placeholders and computational graph 68 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 69 | self.y_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 70 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 71 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 72 | self.s_tf = tf.placeholder(tf.float32, shape=(None, 1)) 73 | 74 | self.x_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 75 | self.y_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 76 | self.t_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 77 | self.u_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 78 | 79 | self.x_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 80 | self.y_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 81 | self.t_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 82 | self.u_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 83 | 84 | self.x_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 85 | self.y_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 86 | self.t_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 87 | self.u_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 88 | 89 | self.x_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 90 | self.y_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 91 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 92 | self.u_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 93 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 94 | 95 | self.y_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 96 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 97 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 98 | 99 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 100 | self.y_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 101 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 102 | 103 | # Evaluate predictions 104 | self.s_pred = self.net_s(self.y_u_tf, self.t_u_tf) 105 | self.s_0_pred = self.net_s(self.y_0_tf, self.t_0_tf) 106 | 107 | self.u_pred = self.net_u(self.x_u_tf, self.y_u_tf, self.t_u_tf) 108 | self.u_0_pred = self.net_u(self.x_0_tf, self.y_0_tf, self.t_0_tf) 109 | 110 | self.u_bc1_pred = self.net_u(self.x_bc1_tf, self.y_bc1_tf, self.t_bc1_tf) 111 | self.u_bc2_pred = self.net_u(self.x_bc2_tf, self.y_bc2_tf, self.t_bc2_tf) 112 | self.u_bc3_pred = self.net_u(self.x_bc3_tf, self.y_bc3_tf, self.t_bc3_tf) 113 | 114 | self.S_bc_pred = self.net_u((self.s_pred - self.mu_x) / self.sigma_x, 115 | self.y_u_tf, 116 | self.t_u_tf) 117 | self.r_Nc_pred = self.net_r_Nc(self.y_Nc_tf, self.t_Nc_tf) 118 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.y_r_tf, self.t_r_tf) 119 | 120 | # Stefan Boundary loss 121 | self.loss_Sbc = tf.reduce_mean(tf.square(self.S_bc_pred)) 122 | self.loss_s_0 = tf.reduce_mean(tf.square(self.s_0_pred - (0.5 * (self.y_0_tf * self.sigma_y + self.mu_y) + 0.5))) # s(y, 0) = y/2 + 1/2 123 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred)) 124 | 125 | # Boundary and Initial loss 126 | self.loss_u_bc1 = tf.reduce_mean(tf.square(self.u_bc1_pred- self.u_bc1_tf)) 127 | self.loss_u_bc2 = tf.reduce_mean(tf.square(self.u_bc2_pred- self.u_bc2_tf)) 128 | self.loss_u_bc3 = tf.reduce_mean(tf.square(self.u_bc3_pred- self.u_bc3_tf)) 129 | self.loss_u_ic = tf.reduce_mean(tf.square(self.u_0_pred - self.u_0_tf)) 130 | 131 | self.loss_u_bcs = self.loss_u_bc1 + self.loss_u_bc2 + self.loss_u_bc3 132 | # Stefan loss 133 | self.loss_Scs = self.loss_Sbc + self.loss_s_0 + self.loss_SNc 134 | 135 | # Residual loss 136 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) # u_t - u_xx = 0 137 | 138 | # Total loss 139 | self.loss = self.loss_u_bcs + self.loss_u_ic + self.loss_Scs + self.loss_res 140 | 141 | # Define optimizer with learning rate schedule 142 | self.global_step = tf.Variable(0, trainable=False) 143 | starter_learning_rate = 1e-3 144 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 145 | 1000, 0.9, staircase=False) 146 | # Passing global_step to minimize() will increment it at each step. 147 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 148 | 149 | self.loss_u_bcs_log = [] 150 | self.loss_u_ic_log = [] 151 | self.loss_res_log = [] 152 | self.saver = tf.train.Saver() 153 | 154 | # Estimate the accuracy in the training 155 | y = np.linspace(0, 1, 100)[:, None] 156 | t = np.linspace(0, 1, 100)[:, None] 157 | y, t = np.meshgrid(y, t) 158 | 159 | self.X_star = np.hstack((y.flatten()[:, None], t.flatten()[:, None])) 160 | self.s_star = self.exact_s(self.X_star) 161 | 162 | # Initialize Tensorflow variables 163 | init = tf.global_variables_initializer() 164 | self.sess.run(init) 165 | 166 | # Xavier initialization 167 | def xavier_init(self, size): 168 | in_dim = size[0] 169 | out_dim = size[1] 170 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 171 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 172 | dtype=tf.float32) 173 | 174 | # Initialize network weights and biases using Xavier initialization 175 | def initialize_NN(self, layers): 176 | weights = [] 177 | biases = [] 178 | num_layers = len(layers) 179 | for l in range(0, num_layers - 1): 180 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 181 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 182 | weights.append(W) 183 | biases.append(b) 184 | return weights, biases 185 | 186 | # Evaluates the forward pass 187 | def forward_pass_u(self, H): 188 | num_layers = len(self.layers_u) 189 | for l in range(0, num_layers - 2): 190 | W = self.weights_u[l] 191 | b = self.biases_u[l] 192 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 193 | W = self.weights_u[-1] 194 | b = self.biases_u[-1] 195 | H = tf.add(tf.matmul(H, W), b) 196 | return H 197 | 198 | def forward_pass_s(self, H): 199 | num_layers = len(self.layers_s) 200 | for l in range(0, num_layers - 2): 201 | W = self.weights_s[l] 202 | b = self.biases_s[l] 203 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 204 | W = self.weights_s[-1] 205 | b = self.biases_s[-1] 206 | H = tf.add(tf.matmul(H, W), b) 207 | return H 208 | 209 | # Forward pass for u 210 | def net_u(self, x, y, t): 211 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 212 | return u 213 | 214 | # Forward pass for s 215 | def net_s(self, y, t): 216 | s = self.forward_pass_s(tf.concat([y, t], 1)) 217 | return s 218 | 219 | def exact_s(self, z): 220 | y = z[:, 0: 1] 221 | t = z[:, 1: 2] 222 | s = 0.5 * y + 1.25 * t + 0.5 223 | return s 224 | 225 | def net_u_x(self, x, y, t): 226 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 227 | u_x = tf.gradients(u, x)[0] / self.sigma_x 228 | u_y = tf.gradients(u, y)[0] / self.sigma_y 229 | return u_x, u_y 230 | 231 | # Forward pass for residual 232 | def net_r_u(self, x, y, t): 233 | u = self.net_u(x, y, t) 234 | u_t = tf.gradients(u, t)[0] / self.sigma_t 235 | 236 | u_x = tf.gradients(u, x)[0] / self.sigma_x 237 | u_y = tf.gradients(u, y)[0] / self.sigma_y 238 | 239 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 240 | u_yy = tf.gradients(u_y, y)[0] / self.sigma_y 241 | residual = u_t - u_xx - u_yy 242 | return residual 243 | 244 | def net_r_Nc(self, y, t): 245 | s = self.net_s(y, t) 246 | s_y = tf.gradients(s, y)[0] / self.sigma_y 247 | s_t = tf.gradients(s, t)[0] / self.sigma_t 248 | 249 | # Normalizing s 250 | s = (s - self.mu_x) / self.sigma_x 251 | u_x, u_y = self.net_u_x(s, y, t) 252 | 253 | residual = u_x - u_y * s_y + s_t 254 | return residual 255 | 256 | def fetch_minibatch(self, sampler, N): 257 | X, Y = sampler.sample(N) 258 | X = (X - self.mu_X) / self.sigma_X 259 | return X, Y 260 | 261 | def train(self, nIter=10000, batch_size=128): 262 | start_time = timeit.default_timer() 263 | for it in range(nIter): 264 | # Fetch boundary and data mini-batches 265 | X_0_batch, u_0_batch = self.fetch_minibatch(self.ics_sampler, batch_size) 266 | X_bc1_batch, u_bc1_batch = self.fetch_minibatch(self.bcs_sampler[0], batch_size) 267 | X_bc2_batch, u_bc2_batch = self.fetch_minibatch(self.bcs_sampler[1], batch_size) 268 | X_bc3_batch, u_bc3_batch = self.fetch_minibatch(self.bcs_sampler[2], batch_size) 269 | # Fetch residual mini-batch 270 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 271 | 272 | # Define a dictionary for associating placeholders with data 273 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.y_u_tf: X_res_batch[:, 1:2], 274 | self.t_u_tf: X_res_batch[:, 2:3], 275 | self.x_0_tf: X_0_batch[:, 0:1], self.y_0_tf: X_0_batch[:, 1:2], 276 | self.t_0_tf: X_0_batch[:, 2:3], self.u_0_tf: u_0_batch, 277 | self.x_bc1_tf: X_bc1_batch[:, 0:1], self.y_bc1_tf: X_bc1_batch[:, 1:2], 278 | self.t_bc1_tf: X_bc1_batch[:, 2:3], self.u_bc1_tf: u_bc1_batch, 279 | self.x_bc2_tf: X_bc2_batch[:, 0:1], self.y_bc2_tf: X_bc2_batch[:, 1:2], 280 | self.t_bc2_tf: X_bc2_batch[:, 2:3], self.u_bc2_tf: u_bc2_batch, 281 | self.x_bc3_tf: X_bc3_batch[:, 0:1], self.y_bc3_tf: X_bc3_batch[:, 1:2], 282 | self.t_bc3_tf: X_bc3_batch[:, 2:3], self.u_bc3_tf: u_bc3_batch, 283 | self.y_Nc_tf: X_res_batch[:, 1:2], self.t_Nc_tf: X_res_batch[:, 2:3], 284 | self.x_r_tf: X_res_batch[:, 0:1], self.y_r_tf: X_res_batch[:, 1:2], 285 | self.t_r_tf: X_res_batch[:, 2:3]} 286 | 287 | self.sess.run(self.train_op, tf_dict) 288 | 289 | # Print 290 | if it % 10 == 0: 291 | elapsed = timeit.default_timer() - start_time 292 | loss_value = self.sess.run(self.loss, tf_dict) 293 | loss_u_bcs_value, loss_u_ic_value, loss_res_value = self.sess.run( 294 | [self.loss_u_bcs, self.loss_u_ic, self.loss_res], tf_dict) 295 | self.loss_u_bcs_log.append(loss_u_bcs_value) 296 | self.loss_u_ic_log.append(loss_u_ic_value) 297 | self.loss_res_log.append(loss_res_value) 298 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_ics: %.3e, Loss_res: %.3e, Time: %.2f' % 299 | (it, loss_value, loss_u_bcs_value, loss_u_ic_value, loss_res_value, elapsed)) 300 | start_time = timeit.default_timer() 301 | 302 | if it % 100 ==0: 303 | s_pred = self.predict_s(self.X_star) 304 | error_s = np.linalg.norm(self.s_star - s_pred, 2) / np.linalg.norm(self.s_star, 2) 305 | print("Free boundary L2 error: {:.3e}".format(error_s)) 306 | 307 | # Predictions for u 308 | def predict_u(self, X_star): 309 | X_star = (X_star - self.mu_X) / self.sigma_X 310 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.y_u_tf: X_star[:, 1:2], self.t_u_tf: X_star[:, 2:3]} 311 | u_star = self.sess.run(self.u_pred, tf_dict) 312 | return u_star 313 | 314 | # Predictions for s 315 | def predict_s(self, X_star): 316 | X_star = (X_star - self.mu_X[1:3]) / self.sigma_X[1:3] 317 | tf_dict = {self.y_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 318 | s_star = self.sess.run(self.s_pred, tf_dict) 319 | return s_star 320 | 321 | 322 | class Stefan2D_inverse_I: 323 | def __init__(self, layers_u, layers_s, ics_sampler, ft_sampler, res_sampler): 324 | # Normalization constants 325 | X, _ = res_sampler.sample(np.int32(1e5)) 326 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 327 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 328 | self.mu_y, self.sigma_y = self.mu_X[1], self.sigma_X[1] 329 | self.mu_t, self.sigma_t = self.mu_X[2], self.sigma_X[2] 330 | 331 | # Samplers 332 | self.ics_sampler = ics_sampler 333 | self.ft_sampler = ft_sampler 334 | self.res_sampler = res_sampler 335 | 336 | # Initialize network weights and biases 337 | self.layers_u = layers_u 338 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 339 | 340 | self.layers_s = layers_s 341 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 342 | 343 | # Define Tensorflow session 344 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 345 | 346 | # Define placeholders and computational graph 347 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 348 | self.y_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 349 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 350 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # u(x,t) 351 | 352 | self.x_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 353 | self.y_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 354 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 355 | self.u_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(y,0) 356 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(y,0) 357 | 358 | self.x_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 359 | self.y_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) 360 | self.t_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 361 | self.u_T_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(y,0) 362 | 363 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 364 | self.y_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 365 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 366 | 367 | # Evaluate predictions 368 | self.s_pred = self.net_s(self.y_u_tf, self.t_u_tf) 369 | self.s_0_pred = self.net_s(self.y_0_tf, self.t_0_tf) 370 | 371 | self.u_pred = self.net_u(self.x_u_tf, self.y_u_tf, self.t_u_tf) 372 | self.u_0_pred = self.net_u(self.x_0_tf, self.y_0_tf, self.t_0_tf) 373 | self.u_T_pred = self.net_u(self.x_T_tf, self.y_T_tf, self.t_T_tf) 374 | 375 | self.S_bc_pred = self.net_u((self.s_pred - self.mu_x) / self.sigma_x, 376 | self.y_u_tf, 377 | self.t_u_tf) 378 | self.r_Nc_pred = self.net_r_Nc(self.y_r_tf, self.t_r_tf) 379 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.y_r_tf, self.t_r_tf) 380 | 381 | # Stefan Boundary loss 382 | self.loss_s_0 = tf.reduce_mean(tf.square(self.s_0_pred - (0.5 * (self.y_0_tf * self.sigma_y + self.mu_y) + 0.5))) # s(y, 0) = y/2 + 1/2 383 | self.loss_Sbc = tf.reduce_mean(tf.square(self.S_bc_pred)) 384 | self.loss_SNc = tf.reduce_mean(tf.square(self.r_Nc_pred)) 385 | 386 | # Initial loss 387 | self.loss_u_0 = tf.reduce_mean(tf.square(self.u_0_pred - self.u_0_tf)) 388 | 389 | # Final Time loss 390 | self.loss_u_T = tf.reduce_mean(tf.square(self.u_T_pred - self.u_T_tf)) 391 | 392 | self.loss_u = self.loss_u_0 + self.loss_u_T 393 | 394 | # Stefan loss 395 | self.loss_Scs = self.loss_s_0 + self.loss_Sbc + self.loss_SNc 396 | # Neumann condition is important! 397 | 398 | # Residual loss 399 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) # u_t - u_xx = 0 400 | 401 | # Total loss 402 | self.loss = self.loss_u + self.loss_Scs + self.loss_res 403 | 404 | # Define optimizer with learning rate schedule 405 | self.global_step = tf.Variable(0, trainable=False) 406 | starter_learning_rate = 1e-3 407 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 408 | 1000, 0.9, staircase=False) 409 | # Passing global_step to minimize() will increment it at each step. 410 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 411 | 412 | self.loss_u_log = [] 413 | self.loss_res_log = [] 414 | self.saver = tf.train.Saver() 415 | 416 | # Initialize Tensorflow variables 417 | init = tf.global_variables_initializer() 418 | self.sess.run(init) 419 | 420 | 421 | # Xavier initialization 422 | def xavier_init(self, size): 423 | in_dim = size[0] 424 | out_dim = size[1] 425 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 426 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 427 | dtype=tf.float32) 428 | 429 | # Initialize network weights and biases using Xavier initialization 430 | def initialize_NN(self, layers): 431 | weights = [] 432 | biases = [] 433 | num_layers = len(layers) 434 | for l in range(0, num_layers - 1): 435 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 436 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 437 | weights.append(W) 438 | biases.append(b) 439 | return weights, biases 440 | 441 | # Evaluates the forward pass 442 | def forward_pass_u(self, H): 443 | num_layers = len(self.layers_u) 444 | for l in range(0, num_layers - 2): 445 | W = self.weights_u[l] 446 | b = self.biases_u[l] 447 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 448 | W = self.weights_u[-1] 449 | b = self.biases_u[-1] 450 | H = tf.add(tf.matmul(H, W), b) 451 | return H 452 | 453 | def forward_pass_s(self, H): 454 | num_layers = len(self.layers_s) 455 | for l in range(0, num_layers - 2): 456 | W = self.weights_s[l] 457 | b = self.biases_s[l] 458 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 459 | W = self.weights_s[-1] 460 | b = self.biases_s[-1] 461 | H = tf.add(tf.matmul(H, W), b) 462 | return H 463 | 464 | # Forward pass for u 465 | def net_u(self, x, y, t): 466 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 467 | return u 468 | 469 | # Forward pass for s 470 | def net_s(self, y, t): 471 | s = self.forward_pass_s(tf.concat([y, t], 1)) 472 | return s 473 | 474 | def net_u_x(self, x, y, t): 475 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 476 | u_x = tf.gradients(u, x)[0] / self.sigma_x 477 | u_y = tf.gradients(u, y)[0] / self.sigma_y 478 | return u_x, u_y 479 | 480 | # Forward pass for residual 481 | def net_r_u(self, x, y, t): 482 | u = self.net_u(x, y, t) 483 | u_t = tf.gradients(u, t)[0] / self.sigma_t 484 | 485 | u_x = tf.gradients(u, x)[0] / self.sigma_x 486 | u_y = tf.gradients(u, y)[0] / self.sigma_y 487 | 488 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 489 | u_yy = tf.gradients(u_y, y)[0] / self.sigma_y 490 | residual = u_t - u_xx - u_yy 491 | return residual 492 | 493 | def net_r_Nc(self, y, t): 494 | s = self.net_s(y, t) 495 | s_y = tf.gradients(s, y)[0] / self.sigma_y 496 | s_t = tf.gradients(s, t)[0] / self.sigma_t 497 | 498 | # Normalizing s 499 | s = (s - self.mu_x) / self.sigma_x 500 | 501 | u_x, u_y = self.net_u_x(s, y, t) 502 | 503 | residual = u_x - tf.multiply(u_y, s_y) + s_t 504 | return residual 505 | 506 | 507 | def fetch_minibatch(self, sampler, N): 508 | X, Y = sampler.sample(N) 509 | X = (X - self.mu_X) / self.sigma_X 510 | return X, Y 511 | 512 | def train(self, nIter=10000, batch_size=128): 513 | start_time = timeit.default_timer() 514 | for it in range(nIter): 515 | # Fetch boundary and data mini-batches 516 | X_0_batch, u_0_batch = self.fetch_minibatch(self.ics_sampler, batch_size) 517 | X_T_batch, u_T_batch = self.fetch_minibatch(self.ft_sampler, batch_size) 518 | # Fetch residual mini-batch 519 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 520 | 521 | # Define a dictionary for associating placeholders with data 522 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.y_u_tf: X_res_batch[:, 1:2], 523 | self.t_u_tf: X_res_batch[:, 2:3], 524 | self.x_0_tf: X_0_batch[:, 0:1], self.y_0_tf: X_0_batch[:, 1:2], 525 | self.t_0_tf: X_0_batch[:, 2:3], self.u_0_tf: u_0_batch, 526 | self.x_T_tf: X_T_batch[:, 0:1], self.y_T_tf: X_T_batch[:, 1:2], 527 | self.t_T_tf: X_T_batch[:, 2:3], self.u_T_tf: u_T_batch, 528 | self.x_r_tf: X_res_batch[:, 0:1], self.y_r_tf: X_res_batch[:, 1:2], 529 | self.t_r_tf: X_res_batch[:, 2:3]} 530 | 531 | self.sess.run(self.train_op, tf_dict) 532 | 533 | # Print 534 | if it % 10 == 0: 535 | elapsed = timeit.default_timer() - start_time 536 | loss_value = self.sess.run(self.loss, tf_dict) 537 | loss_u_value, loss_res_value = self.sess.run( 538 | [self.loss_u, self.loss_res], tf_dict) 539 | self.loss_u_log.append(loss_u_value) 540 | self.loss_res_log.append(loss_res_value) 541 | print('It: %d, Loss: %.3e, Loss_U: %.3e, Loss_res: %.3e, Time: %.2f' % 542 | (it, loss_value, loss_u_value, loss_res_value, elapsed)) 543 | start_time = timeit.default_timer() 544 | 545 | 546 | # Predictions for u 547 | def predict_u(self, X_star): 548 | X_star = (X_star - self.mu_X) / self.sigma_X 549 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.y_u_tf: X_star[:, 1:2], self.t_u_tf: X_star[:, 2:3]} 550 | u_star = self.sess.run(self.u_pred, tf_dict) 551 | return u_star 552 | 553 | # Predictions for s 554 | def predict_s(self, X_star): 555 | X_star = (X_star - self.mu_X[1:3]) / self.sigma_X[1:3] 556 | tf_dict = {self.y_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 557 | s_star = self.sess.run(self.s_pred, tf_dict) 558 | return s_star 559 | 560 | 561 | class Stefan2D_inverse_II: 562 | def __init__(self, layers_u, layers_s, ics_sampler, res_sampler, data_sampler): 563 | # Normalization constants 564 | X, _ = res_sampler.sample(np.int32(1e5)) 565 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 566 | self.mu_x, self.sigma_x = self.mu_X[0], self.sigma_X[0] 567 | self.mu_y, self.sigma_y = self.mu_X[1], self.sigma_X[1] 568 | self.mu_t, self.sigma_t = self.mu_X[2], self.sigma_X[2] 569 | 570 | # Samplers 571 | self.ics_sampler = ics_sampler 572 | self.res_sampler = res_sampler 573 | self.data_sampler = data_sampler 574 | 575 | # Initialize network weights and biases 576 | self.layers_u = layers_u 577 | self.weights_u, self.biases_u = self.initialize_NN(layers_u) 578 | 579 | self.layers_s = layers_s 580 | self.weights_s, self.biases_s = self.initialize_NN(layers_s) 581 | 582 | # Define Tensorflow session 583 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 584 | 585 | # Define placeholders and computational graph 586 | self.x_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 587 | self.y_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 588 | self.t_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # [0, 1] 589 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) # u(x,t) 590 | self.s_tf = tf.placeholder(tf.float32, shape=(None, 1)) 591 | 592 | self.x_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 593 | self.y_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 594 | self.t_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 595 | self.u_data_tf = tf.placeholder(tf.float32, shape=(None, 1)) 596 | 597 | self.y_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) 598 | self.t_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # t = 0 599 | self.s_0_tf = tf.placeholder(tf.float32, shape=(None, 1)) # s(y,0) 600 | 601 | self.y_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 602 | self.t_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 603 | self.u_Nc_tf = tf.placeholder(tf.float32, shape=(None, 1)) 604 | 605 | self.x_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 606 | self.y_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 607 | self.t_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 608 | 609 | # Evaluate predictions 610 | self.s_pred = self.net_s(self.y_u_tf, self.t_u_tf) 611 | self.u_pred = self.net_u(self.x_u_tf, self.y_u_tf, self.t_u_tf) 612 | 613 | self.u_data_pred = self.net_u(self.x_data_tf, self.y_data_tf, self.t_data_tf) 614 | self.u_bc_pred = self.net_u((self.s_pred - self.mu_x) / self.sigma_x, 615 | self.y_u_tf, 616 | self.t_u_tf) 617 | self.r_Nc_pred = self.net_r_Nc(self.y_Nc_tf, self.t_Nc_tf) 618 | self.r_u_pred = self.net_r_u(self.x_r_tf, self.y_r_tf, self.t_r_tf) 619 | 620 | # Stefan Boundary loss 621 | self.loss_bc = tf.reduce_mean(tf.square(self.u_bc_pred)) 622 | self.loss_s_0 = tf.reduce_mean(tf.square(self.net_s(self.y_0_tf, self.t_0_tf) - 623 | (0.5 * (self.y_0_tf * self.sigma_y + self.mu_y) + 0.5))) # s(y, 0) = y/2 + 1/2 624 | self.loss_Nc = tf.reduce_mean(tf.square(self.r_Nc_pred)) 625 | 626 | # Data loss 627 | self.loss_data = tf.reduce_mean(tf.square(self.u_data_pred - self.u_data_tf)) 628 | 629 | # Boundary loss 630 | self.loss_bcs = self.loss_bc + self.loss_s_0 + self.loss_Nc 631 | # Neumann condition is important! 632 | 633 | # Residual loss 634 | self.loss_res = tf.reduce_mean(tf.square(self.r_u_pred)) # u_t - u_xx = 0 635 | 636 | # Total loss 637 | self.loss = self.loss_bcs + self.loss_res + self.loss_data 638 | 639 | # Define optimizer with learning rate schedule 640 | self.global_step = tf.Variable(0, trainable=False) 641 | starter_learning_rate = 1e-3 642 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 643 | 1000, 0.9, staircase=False) 644 | # Passing global_step to minimize() will increment it at each step. 645 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 646 | 647 | self.loss_bcs_log = [] 648 | self.loss_data_log = [] 649 | self.loss_res_log = [] 650 | self.saver = tf.train.Saver() 651 | 652 | # Initialize Tensorflow variables 653 | init = tf.global_variables_initializer() 654 | self.sess.run(init) 655 | 656 | 657 | # Xavier initialization 658 | def xavier_init(self, size): 659 | in_dim = size[0] 660 | out_dim = size[1] 661 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 662 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 663 | dtype=tf.float32) 664 | 665 | # Initialize network weights and biases using Xavier initialization 666 | def initialize_NN(self, layers): 667 | weights = [] 668 | biases = [] 669 | num_layers = len(layers) 670 | for l in range(0, num_layers - 1): 671 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 672 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 673 | weights.append(W) 674 | biases.append(b) 675 | return weights, biases 676 | 677 | # Evaluates the forward pass 678 | def forward_pass_u(self, H): 679 | num_layers = len(self.layers_u) 680 | for l in range(0, num_layers - 2): 681 | W = self.weights_u[l] 682 | b = self.biases_u[l] 683 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 684 | W = self.weights_u[-1] 685 | b = self.biases_u[-1] 686 | H = tf.add(tf.matmul(H, W), b) 687 | return H 688 | 689 | def forward_pass_s(self, H): 690 | num_layers = len(self.layers_s) 691 | for l in range(0, num_layers - 2): 692 | W = self.weights_s[l] 693 | b = self.biases_s[l] 694 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 695 | W = self.weights_s[-1] 696 | b = self.biases_s[-1] 697 | H = tf.add(tf.matmul(H, W), b) 698 | return H 699 | 700 | # Forward pass for u 701 | def net_u(self, x, y, t): 702 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 703 | return u 704 | 705 | def net_s(self, y, t): 706 | s = self.forward_pass_s(tf.concat([y, t], 1)) 707 | return s 708 | 709 | def net_u_x(self, x, y, t): 710 | u = self.forward_pass_u(tf.concat([x, y, t], 1)) 711 | u_x = tf.gradients(u, x)[0] / self.sigma_x 712 | u_y = tf.gradients(u, y)[0] / self.sigma_y 713 | return u_x, u_y 714 | 715 | # Forward pass for residual 716 | def net_r_u(self, x, y, t): 717 | u = self.net_u(x, y, t) 718 | u_t = tf.gradients(u, t)[0] / self.sigma_t 719 | 720 | u_x = tf.gradients(u, x)[0] / self.sigma_x 721 | u_y = tf.gradients(u, y)[0] / self.sigma_y 722 | 723 | u_xx = tf.gradients(u_x, x)[0] / self.sigma_x 724 | u_yy = tf.gradients(u_y, y)[0] / self.sigma_y 725 | residual = u_t - u_xx - u_yy 726 | return residual 727 | 728 | def net_r_Nc(self, y, t): 729 | s = self.net_s(y, t) 730 | s_y = tf.gradients(s, y)[0] / self.sigma_y 731 | s_t = tf.gradients(s, t)[0] / self.sigma_t 732 | 733 | # Normalizing s 734 | s = (s - self.mu_x) / self.sigma_x 735 | u_x, u_y = self.net_u_x(s, y, t) 736 | 737 | residual = u_x - tf.multiply(u_y, s_y) + s_t 738 | return residual 739 | 740 | def fetch_minibatch(self, sampler, N): 741 | X, Y = sampler.sample(N) 742 | X = (X - self.mu_X) / self.sigma_X 743 | return X, Y 744 | 745 | def train(self, nIter=10000, batch_size=128): 746 | start_time = timeit.default_timer() 747 | for it in range(nIter): 748 | # Fetch boundary and data mini-batches 749 | X_0_batch, _ = self.fetch_minibatch(self.ics_sampler, batch_size) 750 | X_data_batch, u_data_batch = self.fetch_minibatch(self.data_sampler, batch_size) 751 | 752 | # Fetch residual mini-batch 753 | X_res_batch, _ = self.fetch_minibatch(self.res_sampler, batch_size) 754 | 755 | # Define a dictionary for associating placeholders with data 756 | tf_dict = {self.x_u_tf: X_res_batch[:, 0:1], self.y_u_tf: X_res_batch[:, 1:2], 757 | self.t_u_tf: X_res_batch[:, 2:3], 758 | self.y_0_tf: X_0_batch[:, 1:2], self.t_0_tf: X_0_batch[:, 2:3], 759 | self.y_Nc_tf: X_res_batch[:, 1:2], self.t_Nc_tf: X_res_batch[:, 2:3], 760 | self.x_r_tf: X_res_batch[:, 0:1], self.y_r_tf: X_res_batch[:, 1:2], 761 | self.t_r_tf: X_res_batch[:, 2:3], 762 | self.x_data_tf: X_data_batch[:, 0:1], self.y_data_tf: X_data_batch[:, 1:2], 763 | self.t_data_tf: X_data_batch[:, 2:3], self.u_data_tf: u_data_batch} 764 | 765 | self.sess.run(self.train_op, tf_dict) 766 | 767 | # Print 768 | if it % 10 == 0: 769 | elapsed = timeit.default_timer() - start_time 770 | loss_value = self.sess.run(self.loss, tf_dict) 771 | loss_bcs_value, loss_data_value, loss_res_value = self.sess.run( 772 | [self.loss_bcs, self.loss_data, self.loss_res], tf_dict) 773 | self.loss_bcs_log.append(loss_bcs_value) 774 | self.loss_data_log.append(loss_data_value) 775 | self.loss_res_log.append(loss_res_value) 776 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_Data: %.3e, Loss_res: %.3e, Time: %.2f' % 777 | (it, loss_value, loss_bcs_value, loss_data_value, loss_res_value, elapsed)) 778 | start_time = timeit.default_timer() 779 | 780 | # Predictions for u 781 | def predict_u(self, X_star): 782 | X_star = (X_star - self.mu_X) / self.sigma_X 783 | tf_dict = {self.x_u_tf: X_star[:, 0:1], self.y_u_tf: X_star[:, 1:2], self.t_u_tf: X_star[:, 2:3]} 784 | u_star = self.sess.run(self.u_pred, tf_dict) 785 | return u_star 786 | 787 | # Predictions for s 788 | def predict_s(self, X_star): 789 | X_star = (X_star - self.mu_X[1:3]) / self.sigma_X[1:3] 790 | tf_dict = {self.y_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]} 791 | s_star = self.sess.run(self.s_pred, tf_dict) 792 | return s_star 793 | 794 | 795 | 796 | 797 | 798 | 799 | 800 | 801 | 802 | --------------------------------------------------------------------------------