├── lab report by ZRH.pdf └── burgers_allen.py /lab report by ZRH.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hannah-Zhou/Mathematics-Experiment/HEAD/lab report by ZRH.pdf -------------------------------------------------------------------------------- /burgers_allen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[9]: 5 | 6 | 7 | from IPython.core.display import display, HTML 8 | display(HTML("")) 9 | 10 | 11 | # In[10]: 12 | 13 | 14 | import torch 15 | import torch.nn as nn 16 | import torch.optim as optim 17 | from torch.optim.lr_scheduler import StepLR 18 | 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | from mpl_toolkits.mplot3d import Axes3D 22 | from matplotlib import cm 23 | import os 24 | from pyDOE import lhs 25 | import shutil 26 | 27 | import argparse 28 | 29 | 30 | # ## 1、基础网络结构 31 | 32 | # 网络结构分为普通的和带残差的全连接前馈神经网络(Fully-Connected Feedforward Network),选择其中一种来实现。 33 | 34 | # ### DNN 35 | 36 | # In[11]: 37 | 38 | 39 | def activation(name): 40 | if name in ['tanh','TANH']: 41 | return nn.Tanh() 42 | elif name in ['relu', 'RELU']: 43 | return nn.ReLU(inplace=True) 44 | elif name in ['leaky_relu', 'LeakyReLU']: 45 | return nn.LeakyReLU(inplace=True) 46 | elif name in ['sigmoid', 'SIGMOID']: 47 | return nn.Sigmoid() 48 | elif name in ['softplus', 'SOFTPLUS']: 49 | return nn.Softplus() 50 | else: 51 | raise ValueError(f'unknown activation function: {name}') 52 | 53 | 54 | # In[12]: 55 | 56 | 57 | class DNN(nn.Module): 58 | """Deep Neural Network""" 59 | 60 | def __init__(self, dim_in, dim_out, dim_hidden, hidden_layers, 61 | act_name='tanh', init_name=None): 62 | super().__init__() 63 | model = nn.Sequential() 64 | 65 | model.add_module('fc0', nn.Linear(dim_in, dim_hidden, bias=True)) 66 | model.add_module('act0', activation(act_name)) 67 | 68 | for i in range(1, hidden_layers): 69 | model.add_module(f'fc{i}', nn.Linear(dim_hidden, dim_hidden, bias=True)) 70 | model.add_module(f'act{i}', activation(act_name)) 71 | 72 | model.add_module(f'fc{hidden_layers}', nn.Linear(dim_hidden, dim_out, bias=True)) 73 | 74 | self.model = model 75 | 76 | if init_name is not None: 77 | self.init_weight(init_name) 78 | 79 | def init_weight(self, name): 80 | if name == 'xavier_normal': 81 | nn_init = nn.init.xavier_normal_ 82 | elif name == 'xavier_uniform': 83 | nn_init = nn.init.xavier_uniform_ 84 | elif name == 'kaiming_normal': 85 | nn_init = nn.init.kaiming_normal_ 86 | elif name == 'kaiming_uniform': 87 | nn_init = nn.init.kaiming_uniform_ 88 | else: 89 | raise ValueError(f'unknown initialization function: {name}') 90 | 91 | for param in self.parameters(): 92 | if len(param.shape) > 1: 93 | nn_init(param) 94 | 95 | def forward(self, x): 96 | return self.model(x) 97 | 98 | def model_size(self): 99 | n_params = 0 100 | for param in self.parameters(): 101 | n_params += param.numel() 102 | return n_params 103 | 104 | 105 | # ### ResDNN 106 | 107 | # In[13]: 108 | 109 | 110 | class ResBlock(nn.Module): 111 | """Residual Block """ 112 | 113 | def __init__(self, dim_in, dim_out, dim_hidden, act_name='tanh'): 114 | super().__init__() 115 | 116 | assert(dim_in == dim_out) 117 | 118 | block = nn.Sequential() 119 | block.add_module('act0', activation(act_name)) 120 | block.add_module('fc0', nn.Linear(dim_in, dim_hidden, bias=True)) 121 | block.add_module('act1', activation(act_name)) 122 | block.add_module('fc1', nn.Linear(dim_hidden, dim_out, bias=True)) 123 | self.block = block 124 | 125 | def forward(self, x): 126 | identity = x 127 | out = self.block(x) 128 | return identity + out 129 | 130 | 131 | # In[ ]: 132 | 133 | 134 | class ResDNN(nn.Module): 135 | """Residual Deep Neural Network """ 136 | 137 | def __init__(self, dim_in, dim_out, dim_hidden, res_blocks, act_name='tanh', init_name='kaiming_normal'): 138 | super().__init__() 139 | 140 | model = nn.Sequential() 141 | model.add_module('fc_first', nn.Linear(dim_in, dim_hidden, bias=True)) 142 | 143 | for i in range(res_blocks): 144 | res_block = ResBlock(dim_hidden, dim_hidden, dim_hidden, act_name=act_name) 145 | model.add_module(f'res_block{i+1}', res_block) 146 | 147 | model.add_module('act_last', activation(act_name)) 148 | model.add_module('fc_last', nn.Linear(dim_hidden, dim_out, bias=True)) 149 | 150 | self.model = model 151 | 152 | if init_name is not None: 153 | self.init_weight(init_name) 154 | 155 | def init_weight(self, name): 156 | if name == 'xavier_normal': 157 | nn_init = nn.init.xavier_normal_ 158 | elif name == 'xavier_uniform': 159 | nn_init = nn.init.xavier_uniform_ 160 | elif name == 'kaiming_normal': 161 | nn_init = nn.init.kaiming_normal_ 162 | elif name == 'kaiming_uniform': 163 | nn_init = nn.init.kaiming_uniform_ 164 | else: 165 | raise ValueError(f'unknown initialization function: {name}') 166 | 167 | for param in self.parameters(): 168 | if len(param.shape) > 1: 169 | nn_init(param) 170 | 171 | def forward(self, x): 172 | return self.model(x) 173 | 174 | def model_size(self): 175 | n_params = 0 176 | for param in self.parameters(): 177 | n_params += param.numel() 178 | return n_params 179 | 180 | 181 | # ## 2、Burgers方程 182 | 183 | # 考虑一维Burgers方程: 184 | # $$ 185 | # \left\{ 186 | # \begin{array}{rl} 187 | # u_t + uu_x - \frac{0.01}\pi u_{xx} = 0, & x \in[-1, 1], ~~ t \in [0, 1]\\ 188 | # u(0, x) = - \sin(\pi x), & \\ 189 | # u(t,-1) = u(t, 1) = 0. 190 | # \end{array} 191 | # \right. 192 | # $$ 193 | 194 | # ### 2.1、问题描述 195 | 196 | # In[6]: 197 | 198 | 199 | class Problem_Burgers(object): 200 | """ Description of Burgers Equation""" 201 | def __init__(self,domain=(0,1,-1,1)): 202 | self.domain=domain 203 | 204 | def __repr__(self): 205 | return f'{self.__doc__}' 206 | 207 | def iv(self,x): 208 | iv=-np.sin(np.pi*x[:,[1]]) 209 | return iv 210 | 211 | def bv(self,x): 212 | bv=np.zeros_like(x[:,[0]]) 213 | return bv 214 | 215 | def epilson(self): 216 | epilson=0.01/np.pi 217 | return epilson 218 | 219 | 220 | # In[7]: 221 | 222 | 223 | problem=Problem_Burgers() 224 | 225 | 226 | # ### 2.2、数据集生成 227 | 228 | # In[8]: 229 | 230 | 231 | class Trainset_Burgers(object): 232 | def __init__(self,problem,*args,**kwargs): 233 | self.problem=problem 234 | self.domain=problem.domain 235 | self.args=args 236 | self.method=kwargs['method'] 237 | 238 | def __call__(self,plot=False,verbose=None): 239 | if self.method=='uniform': 240 | n_t,n_x=self.args[0],self.args[1] 241 | p,p_bc,p_ic=self._uniform_sample(n_t,n_x) 242 | elif self.method=='lhs': 243 | n,n_bc,n_ic=self.args[0],self.args[1],self.args[2] 244 | p,p_bc,p_ic=self._lhs_sample(n,n_bc,n_ic) 245 | 246 | bv=self.problem.bv(p_bc) 247 | iv=self.problem.iv(p_ic) 248 | 249 | if plot: 250 | fig = plt.figure() 251 | ax = fig.add_subplot(111) 252 | ax.scatter(p[:, 0], p[:, 1], facecolor='r', s=10) 253 | ax.scatter(p_bc[:, 0], p_bc[:, 1], facecolor='b', s=10) 254 | ax.scatter(p_ic[:, 0], p_ic[:, 1], facecolor='g', s=10) 255 | ax.set_xlim(-0.01, 1.01) 256 | ax.set_ylim(-1.01, 1.01) 257 | ax.set_aspect('equal') 258 | plt.show() 259 | 260 | if verbose=='tensor': 261 | p=torch.from_numpy(p).float() 262 | p_bc=torch.from_numpy(p_bc).float() 263 | p_ic=torch.from_numpy(p_ic).float() 264 | bv=torch.from_numpy(bv).float() 265 | iv=torch.from_numpy(iv).float() 266 | return p,p_bc,p_ic,bv,iv 267 | return p,p_bc,p_ic,bv,iv 268 | def _uniform_sample(self,n_t,n_x): 269 | t_min,t_max,x_min,x_max=self.domain 270 | t = np.linspace(t_min, t_max, n_t) 271 | x = np.linspace(x_min, x_max, n_x) 272 | t,x = np.meshgrid(t,x) 273 | tx=np.hstack((t.reshape(t.size,-1),x.reshape(x.size,-1))) 274 | 275 | mask_ic=(tx[:,0]-t_min)==0 276 | mask_bc=(tx[:,1]-x_min)*(x_max-tx[:,1])==0 277 | p_ic=tx[mask_ic] 278 | p_bc=tx[mask_bc] 279 | p=tx[np.logical_not(mask_ic,mask_bc)] 280 | return p,p_bc,p_ic 281 | 282 | def _lhs_sample(self,n,n_bc,n_ic): 283 | t_min,t_max,x_min,x_max=self.domain 284 | 285 | lb = np.array([t_min, x_min]) 286 | ub = np.array([t_max, x_max]) 287 | p = lb + (ub - lb) * lhs(2, n) 288 | 289 | lb = np.array([t_min, x_min]) 290 | ub = np.array([t_max, x_min]) 291 | p_bc = lb + (ub - lb) * lhs(2, n_bc//2) 292 | 293 | lb = np.array([t_min, x_max]) 294 | ub = np.array([t_max, x_max]) 295 | temp = lb + (ub - lb) * lhs(2, n_bc//2) 296 | p_bc = np.vstack((p_bc, temp)) 297 | 298 | lb = np.array([t_min, x_min]) 299 | ub = np.array([t_min, x_max]) 300 | p_ic = lb + (ub - lb) * lhs(2, n_ic) 301 | 302 | 303 | return p,p_bc,p_ic 304 | 305 | 306 | # In[9]: 307 | 308 | 309 | class Testset_Burgers(object): 310 | """Dataset on a square domain""" 311 | def __init__(self,problem,*args): 312 | self.problem = problem 313 | self.domain = problem.domain 314 | self.args = args 315 | 316 | def __repr__(self): 317 | return f'{self.__doc__}' 318 | 319 | def __call__(self,verbose=None): 320 | n_t,n_x=self.args[0],self.args[1] 321 | p,t,x=self._uniform_sample(n_t,n_x) 322 | if verbose=='tensor': 323 | p=torch.from_numpy(p).float() 324 | t=torch.from_numpy(t).float() 325 | x=torch.from_numpy(x).float() 326 | return p,t,x 327 | return p,t,x 328 | 329 | def _uniform_sample(self,n_t,n_x): 330 | t_min,t_max,x_min,x_max=self.domain 331 | t = np.linspace(t_min, t_max, n_t) 332 | x = np.linspace(x_min, x_max, n_x) 333 | t,x = np.meshgrid(t,x) 334 | p=np.hstack((t.reshape(t.size,-1),x.reshape(x.size,-1))) 335 | return p,t,x 336 | 337 | 338 | # In[10]: 339 | 340 | 341 | trainset = Trainset_Burgers(problem, 40, 40, method='uniform') 342 | p,p_bc,p_ic,bv,iv=trainset(plot=True) 343 | print(p.shape,p_bc.shape,p_ic.shape) 344 | 345 | 346 | # ### 2.3、网络结构 347 | 348 | # In[11]: 349 | 350 | 351 | def grad(outputs, inputs): 352 | return torch.autograd.grad(outputs, inputs, 353 | grad_outputs=torch.ones_like(outputs), 354 | create_graph=True) 355 | 356 | class PINN_Burgers(DNN): 357 | def __init__(self,dim_in,dim_out,dim_hidden, hidden_layers, 358 | act_name='sigmoid',init_name='xavier_normal'): 359 | super().__init__(dim_in,dim_out,dim_hidden,hidden_layers, 360 | act_name=act_name, init_name=init_name) 361 | def forward(self,problem,p,p_bc=None,p_ic=None): 362 | p.requires_grad_(True) 363 | 364 | u=super().forward(p) 365 | if p_bc is None: 366 | return u 367 | grad_u = grad(u, p)[0] 368 | u_t = grad_u[:, [0]] 369 | u_x = grad_u[:, [1]] 370 | u_xx = grad(u_x, p)[0][:, [1]] 371 | 372 | p.detach_() 373 | 374 | epilson=problem.epilson() 375 | f=u_t+u*u_x-epilson*u_xx 376 | 377 | bv_bar=super().forward(p_bc) 378 | iv_bar=super().forward(p_ic) 379 | return f,bv_bar,iv_bar 380 | 381 | 382 | # In[12]: 383 | 384 | 385 | class ResPINN_Burgers(ResDNN): 386 | def __init__(self,dim_in,dim_out,dim_hidden, hidden_layers, 387 | act_name='sigmoid',init_name='xavier_normal'): 388 | super().__init__(dim_in,dim_out,dim_hidden,res_blocks, 389 | act_name=act_name, init_name=init_name) 390 | def forward(self,problem,p,p_bc=None,p_ic=None): 391 | p.requires_grad_(True) 392 | 393 | u=super().forward(p) 394 | if p_bc is None: 395 | return u 396 | grad_u = grad(u, p)[0] 397 | u_t = grad_u[:, [0]] 398 | u_x = grad_u[:, [1]] 399 | u_xx = grad(u_x, p)[0][:, [1]] 400 | 401 | p.detach_() 402 | epilson=problem.epilson() 403 | f=u_t+ u*u_x-epilson*u_xx 404 | 405 | bv_bar=super().forward(p_bc) 406 | iv_bar=super().forward(p_ic) 407 | return f,bv_bar,iv_bar 408 | 409 | 410 | # In[13]: 411 | 412 | 413 | model = PINN_Burgers(2, 1, 10, 8) 414 | print(model.model_size()) 415 | 416 | 417 | # ### 2.4、Options 418 | 419 | # In[14]: 420 | 421 | 422 | class Options_Burgers(object): 423 | def __init__(self): 424 | parser = argparse.ArgumentParser() 425 | parser.add_argument('--no_cuda', action='store_true', default=False, help='disable CUDA or not') 426 | parser.add_argument('--dim_hidden', type=int, default=10, help='neurons in hidden layers') 427 | parser.add_argument('--hidden_layers', type=int, default=4, help='number of hidden layers') 428 | parser.add_argument('--res_blocks', type=int, default=4, help='number of residual blocks') 429 | parser.add_argument('--lam', type=float, default=1, help='weight in loss function') 430 | parser.add_argument('--lr', type=float, default=1e-3, help='initial learning rate') 431 | parser.add_argument('--epochs_Adam', type=int, default=1000, help='epochs for Adam optimizer') 432 | parser.add_argument('--epochs_LBFGS', type=int, default=200, help='epochs for LBFGS optimizer') 433 | parser.add_argument('--step_size', type=int, default=2000, help='step size in lr_scheduler for Adam optimizer') 434 | parser.add_argument('--gamma', type=float, default=0.7, help='gamma in lr_scheduler for Adam optimizer') 435 | parser.add_argument('--resume', type=bool, default=False, help='resume or not') 436 | parser.add_argument('--sample_method', type=str, default='lhs', help='sample method') 437 | parser.add_argument('--n_t', type=int, default=100, help='sample points in x-direction for uniform sample') 438 | parser.add_argument('--n_x', type=int, default=100, help='sample points in y-direction for uniform sample') 439 | parser.add_argument('--n', type=int, default=10000, help='sample points in domain for lhs sample') 440 | parser.add_argument('--n_bc', type=int, default=400, help='sample points on the boundary for lhs sample') 441 | parser.add_argument('--n_ic', type=int, default=400, help='sample points on the initial for lhs sample') 442 | 443 | self.parser = parser 444 | def parse(self): 445 | arg = self.parser.parse_args(args=[]) 446 | arg.cuda = not arg.no_cuda and torch.cuda.is_available() 447 | arg.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 448 | return arg 449 | 450 | 451 | # In[15]: 452 | 453 | 454 | args=Options_Burgers().parse() 455 | print(args) 456 | 457 | 458 | # ### 2.5、训练过程 459 | 460 | # In[16]: 461 | 462 | 463 | def save_model(state,is_best=None,save_dir=None): 464 | last_model = os.path.join(save_dir, 'last_model.pth.tar') 465 | torch.save(state, last_model) 466 | if is_best: 467 | best_model = os.path.join(save_dir, 'best_model.pth.tar') 468 | shutil.copyfile(last_model, best_model) 469 | 470 | class Trainer_Burgers(object): 471 | def __init__(self,args): 472 | self.device=args.device 473 | self.problem=args.problem 474 | 475 | self.lam=args.lam 476 | self.criterion=nn.MSELoss() 477 | 478 | self.model = args.model 479 | self.model_name = self.model.__class__.__name__ 480 | self.model_path = self._model_path() 481 | 482 | self.epochs_Adam = args.epochs_Adam 483 | self.epochs_LBFGS = args.epochs_LBFGS 484 | self.optimizer_Adam = optim.Adam(self.model.parameters(), lr=args.lr) 485 | self.optimizer_LBFGS = optim.LBFGS(self.model.parameters(), 486 | max_iter=20, 487 | tolerance_grad=1.e-8, 488 | tolerance_change=1.e-12) 489 | self.lr_scheduler = StepLR(self.optimizer_Adam, 490 | step_size=args.step_size, 491 | gamma=args.gamma) 492 | 493 | self.model.to(self.device) 494 | self.model.zero_grad() 495 | 496 | self.p, self.p_bc, self.p_ic, self.bv, self.iv = args.trainset(verbose='tensor') 497 | self.f=torch.from_numpy(np.zeros_like(self.p[:,0])).float() 498 | self.g=torch.cat((self.bv,self.iv)) 499 | 500 | def _model_path(self): 501 | """Path to save the model""" 502 | if not os.path.exists('checkpoints'): 503 | os.mkdir('checkpoints') 504 | 505 | path = os.path.join('checkpoints', self.model_name) 506 | if not os.path.exists(path): 507 | os.mkdir(path) 508 | 509 | return path 510 | 511 | def train(self): 512 | best_loss=1.e10 513 | for epoch in range(self.epochs_Adam): 514 | loss, loss1, loss2 = self.train_Adam() 515 | if (epoch + 1) % 100 == 0: 516 | self.infos_Adam(epoch+1, loss, loss1, loss2) 517 | 518 | valid_loss = self.validate(epoch) 519 | is_best = valid_loss < best_loss 520 | best_loss = valid_loss if is_best else best_loss 521 | state = { 522 | 'epoch': epoch, 523 | 'state_dict': self.model.state_dict(), 524 | 'best_loss': best_loss 525 | } 526 | save_model(state, is_best, save_dir=self.model_path) 527 | 528 | for epoch in range(self.epochs_Adam, self.epochs_Adam + self.epochs_LBFGS): 529 | loss, loss1, loss2 = self.train_LBFGS() 530 | if (epoch + 1) % 20 == 0: 531 | self.infos_LBFGS(epoch+1, loss, loss1, loss2) 532 | 533 | valid_loss = self.validate(epoch) 534 | is_best = valid_loss < best_loss 535 | best_loss = valid_loss if is_best else best_loss 536 | state = { 537 | 'epoch': epoch, 538 | 'state_dict': self.model.state_dict(), 539 | 'best_loss': best_loss 540 | } 541 | save_model(state, is_best, save_dir=self.model_path) 542 | def train_Adam(self): 543 | self.optimizer_Adam.zero_grad() 544 | 545 | f_pred,bv_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 546 | g_pred=torch.cat((bv_pred,iv_pred)) 547 | loss1=self.criterion(f_pred,self.f) 548 | loss2=self.criterion(g_pred,self.g) 549 | loss=loss1+self.lam*loss2 550 | 551 | loss.backward() 552 | self.optimizer_Adam.step() 553 | self.lr_scheduler.step() 554 | 555 | return loss.item(),loss1.item(),loss2.item() 556 | 557 | def infos_Adam(self,epoch,loss,loss1,loss2): 558 | infos = 'Adam ' + f'Epoch #{epoch:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.4e} = {loss1:.4e} + {self.lam} * {loss2:.4e} ' + f'lr: {self.lr_scheduler.get_lr()[0]:.2e} ' 559 | print(infos) 560 | 561 | def train_LBFGS(self): 562 | 563 | f_pred,bv_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 564 | g_pred=torch.cat((bv_pred,iv_pred)) 565 | loss1=self.criterion(f_pred,self.f) 566 | loss2=self.criterion(g_pred,self.g) 567 | 568 | def closure(): 569 | if torch.is_grad_enabled(): 570 | self.optimizer_LBFGS.zero_grad() 571 | f_pred,bv_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 572 | g_pred=torch.cat((bv_pred,iv_pred)) 573 | loss1=self.criterion(f_pred,self.f) 574 | loss2=self.criterion(g_pred,self.g) 575 | loss = loss1 + self.lam * loss2 576 | 577 | if loss.requires_grad: 578 | loss.backward() 579 | return loss 580 | self.optimizer_LBFGS.step(closure) 581 | loss=closure() 582 | 583 | return loss.item(), loss1.item(), loss2.item() 584 | 585 | def infos_LBFGS(self, epoch, loss, loss1, loss2): 586 | infos = 'LBFGS ' + f'Epoch #{epoch:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.2e} = {loss1:.2e} + {self.lam:d} * {loss2:.2e} ' 587 | print(infos) 588 | 589 | def validate(self, epoch): 590 | self.model.eval() 591 | f_pred,bv_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 592 | g_pred=torch.cat((bv_pred,iv_pred)) 593 | loss1=self.criterion(f_pred,self.f) 594 | loss2=self.criterion(g_pred,self.g) 595 | loss = loss1 + self.lam * loss2 596 | infos = 'Valid ' + f'Epoch #{epoch+1:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.4e} ' 597 | print(infos) 598 | self.model.train() 599 | return loss.item() 600 | 601 | 602 | # In[17]: 603 | 604 | 605 | Problem=Problem_Burgers() 606 | args=Options_Burgers().parse() 607 | args.problem=Problem_Burgers() 608 | args.model = PINN_Burgers(dim_in=2, 609 | dim_out=1, 610 | dim_hidden=args.dim_hidden, 611 | hidden_layers=args.hidden_layers, 612 | act_name='sigmoid') 613 | if args.sample_method == 'uniform': 614 | args.trainset = Trainset_Burgers(args.problem, args.n_t, args.n_x, method='uniform') 615 | elif args.sample_method == 'lhs': 616 | args.trainset = Trainset_Burgers(args.problem, args.n, args.n_bc,args.n_ic, method='lhs') 617 | 618 | trainer_Burgers = Trainer_Burgers(args) 619 | trainer_Burgers.train() 620 | 621 | 622 | # ### 2.6、测试过程 623 | 624 | # In[40]: 625 | 626 | 627 | class Tester_Burgers(object): 628 | def __init__(self,args): 629 | self.device = args.device 630 | self.problem = args.problem 631 | self.criterion = nn.MSELoss() 632 | self.model = args.model 633 | model_name = self.model.__class__.__name__ 634 | model_path = os.path.join('checkpoints', 635 | model_name, 636 | 'best_model.pth.tar') 637 | best_model = torch.load(model_path) 638 | self.model.load_state_dict(best_model['state_dict']) 639 | self.model.to(self.device) 640 | 641 | self.p,self.t,self.x=args.testset(verbose='tensor') 642 | def predict(self): 643 | self.model.eval() 644 | u_pred=self.model(self.problem,self.p) 645 | u_pred=u_pred.detach().cpu().numpy() 646 | u_pred=u_pred.reshape(self.t.shape) 647 | 648 | plt.figure(figsize=(10,3),frameon=False) 649 | plt.contourf(self.t,self.x,u_pred,levels=1000,cmap='rainbow') 650 | plt.show 651 | 652 | 653 | # In[41]: 654 | 655 | 656 | args=Options_Burgers().parse() 657 | args.problem=Problem_Burgers() 658 | 659 | args.model = PINN_Burgers(dim_in=2, 660 | dim_out=1, 661 | dim_hidden=args.dim_hidden, 662 | hidden_layers=args.hidden_layers, 663 | act_name='sigmoid') 664 | args.testset = Testset_Burgers(args.problem, 100, 100) 665 | tester = Tester_Burgers(args) 666 | tester.predict() 667 | 668 | 669 | # ## 3、Allen-Cahn方程 670 | 671 | # 考虑带周期边界条件的Allen-Cahn方程: 672 | # $$ 673 | # \left\{ 674 | # \begin{array}{rl} 675 | # u_t - 0.0001 u_{xx} + 5u^3 - 5 u = 0, & x \in[-1, 1], ~~ t \in [0, 1]\\ 676 | # u(0, x) = x^2\cos(\pi x), & \\ 677 | # u(t,-1) = u(t, 1), & \\ 678 | # u_x(t, -1) = u_x(t, 1). 679 | # \end{array} 680 | # \right. 681 | # $$ 682 | # 683 | 684 | # ### 3.1、问题描述 685 | 686 | # In[14]: 687 | 688 | 689 | class Problem_AC(object): 690 | def __init__(self,domain=(0,1,-1,1)): 691 | self.domain=domain 692 | 693 | def __repr__(self): 694 | return f'{self.__doc__}' 695 | 696 | def iv(self,p): 697 | x=p[:,[1]] 698 | return x**2*np.cos(np.pi*x) 699 | 700 | def bv(self,p): 701 | bv=np.zeros_like(p[:,[0]]) 702 | return bv 703 | 704 | 705 | 706 | # ### 3.2、数据集生成 707 | 708 | # In[15]: 709 | 710 | 711 | class Trainset_AC(object): 712 | def __init__(self,problem,*args,**kwargs): 713 | self.problem=problem 714 | self.domain=problem.domain 715 | self.args=args 716 | self.method=kwargs['method'] 717 | 718 | def __call__(self,plot=False,verbose=None): 719 | if self.method=='uniform': 720 | n_t,n_x=self.args[0],self.args[1] 721 | p,p_bc,p_ic=self._uniform_sample(n_t,n_x) 722 | elif self.method=='lhs': 723 | n,n_bc,n_ic=self.args[0],self.args[1],self.args[2] 724 | p,p_bc,p_ic=self._lhs_sample(n,n_bc,n_ic) 725 | 726 | bv=self.problem.bv(p_bc) 727 | iv=self.problem.iv(p_ic) 728 | 729 | if plot: 730 | fig = plt.figure() 731 | ax = fig.add_subplot(111) 732 | ax.scatter(p[:, 0], p[:, 1], facecolor='r', s=10) 733 | ax.scatter(p_bc[:, 0], p_bc[:, 1], facecolor='b', s=10) 734 | ax.scatter(p_ic[:, 0], p_ic[:, 1], facecolor='g', s=10) 735 | ax.set_xlim(-0.01, 1.01) 736 | ax.set_ylim(-1.01, 1.01) 737 | ax.set_aspect('equal') 738 | plt.show() 739 | 740 | if verbose=='tensor': 741 | p=torch.from_numpy(p).float() 742 | p_bc=torch.from_numpy(p_bc).float() 743 | p_ic=torch.from_numpy(p_ic).float() 744 | bv=torch.from_numpy(bv).float() 745 | iv=torch.from_numpy(iv).float() 746 | return p,p_bc,p_ic,bv,iv 747 | return p,p_bc,p_ic,bv,iv 748 | def _uniform_sample(self,n_t,n_x): 749 | t_min,t_max,x_min,x_max=self.domain 750 | t = np.linspace(t_min, t_max, n_t) 751 | x = np.linspace(x_min, x_max, n_x) 752 | t,x = np.meshgrid(t,x) 753 | tx=np.hstack((t.reshape(t.size,-1),x.reshape(x.size,-1))) 754 | 755 | mask_ic=(tx[:,0]-t_min)==0 756 | mask_bc=(tx[:,1]-x_min)*(x_max-tx[:,1])==0 757 | p_ic=tx[mask_ic] 758 | p_bc=tx[mask_bc] 759 | p=tx[np.logical_not(mask_ic,mask_bc)] 760 | return p,p_bc,p_ic 761 | 762 | def _lhs_sample(self,n,n_bc,n_ic): 763 | t_min,t_max,x_min,x_max=self.domain 764 | 765 | lb = np.array([t_min, x_min]) 766 | ub = np.array([t_max, x_max]) 767 | p = lb + (ub - lb) * lhs(2, n) 768 | 769 | lb = np.array([t_min, x_min]) 770 | ub = np.array([t_max, x_min]) 771 | p_bc = lb + (ub - lb) * lhs(2, n_bc//2) 772 | 773 | lb = np.array([t_min, x_max]) 774 | ub = np.array([t_max, x_max]) 775 | temp = lb + (ub - lb) * lhs(2, n_bc//2) 776 | p_bc = np.vstack((p_bc, temp)) 777 | 778 | lb = np.array([t_min, x_min]) 779 | ub = np.array([t_min, x_max]) 780 | p_ic = lb + (ub - lb) * lhs(2, n_ic) 781 | 782 | 783 | return p,p_bc,p_ic 784 | 785 | 786 | # In[16]: 787 | 788 | 789 | class Testset_AC(object): 790 | """Dataset on a square domain""" 791 | def __init__(self,problem,*args): 792 | self.problem = problem 793 | self.domain = problem.domain 794 | self.args = args 795 | 796 | def __repr__(self): 797 | return f'{self.__doc__}' 798 | 799 | def __call__(self,verbose=None): 800 | n_t,n_x=self.args[0],self.args[1] 801 | p,t,x=self._uniform_sample(n_t,n_x) 802 | if verbose=='tensor': 803 | p=torch.from_numpy(p).float() 804 | t=torch.from_numpy(t).float() 805 | x=torch.from_numpy(x).float() 806 | return p,t,x 807 | return p,t,x 808 | 809 | def _uniform_sample(self,n_t,n_x): 810 | t_min,t_max,x_min,x_max=self.domain 811 | t = np.linspace(t_min, t_max, n_t) 812 | x = np.linspace(x_min, x_max, n_x) 813 | t,x = np.meshgrid(t,x) 814 | p=np.hstack((t.reshape(t.size,-1),x.reshape(x.size,-1))) 815 | return p,t,x 816 | 817 | 818 | # ### 3.3、网络结构 819 | 820 | # In[17]: 821 | 822 | 823 | def grad(outputs, inputs): 824 | return torch.autograd.grad(outputs, inputs, 825 | grad_outputs=torch.ones_like(outputs), 826 | create_graph=True) 827 | 828 | class PINN_AC(DNN): 829 | def __init__(self,dim_in,dim_out,dim_hidden, hidden_layers, 830 | act_name='sigmoid',init_name='xavier_normal'): 831 | super().__init__(dim_in,dim_out,dim_hidden,hidden_layers, 832 | act_name=act_name, init_name=init_name) 833 | def forward(self,problem,p,p_bc=None,p_ic=None): 834 | p.requires_grad_(True) 835 | 836 | u=super().forward(p) 837 | if p_bc is None: 838 | return u 839 | grad_u = grad(u, p)[0] 840 | u_t = grad_u[:, [0]] 841 | u_x = grad_u[:, [1]] 842 | u_xx = grad(u_x, p)[0][:, [1]] 843 | 844 | p.detach_() 845 | 846 | f=u_t-0.0001*u_xx+5*u**3-5*u 847 | 848 | p_bc.requires_grad_(True) 849 | p_bcop=p_bc 850 | p_bcop[:,1]=-p_bc[:,1] 851 | p_bcop.requires_grad_(True) 852 | u_b=super().forward(p_bc) 853 | u_bop=super().forward(p_bcop) 854 | u_b_x=grad(u_b,p_bc)[0][:,1] 855 | u_bop_x=grad(u_bop,p_bcop)[0][:,1] 856 | p_bc.detach_() 857 | p_bcop.detach_() 858 | 859 | bv_bar=u_b-u_bop 860 | bv_x_bar=u_b_x-u_bop_x 861 | iv_bar=super().forward(p_ic) 862 | return f,bv_bar,bv_x_bar,iv_bar 863 | 864 | 865 | # In[25]: 866 | 867 | 868 | class ResPINN_AC(ResDNN): 869 | def __init__(self,dim_in,dim_out,dim_hidden, hidden_layers, 870 | act_name='sigmoid',init_name='xavier_normal'): 871 | super().__init__(dim_in,dim_out,dim_hidden,res_blocks, 872 | act_name=act_name, init_name=init_name) 873 | def forward(self,problem,p,p_bc=None,p_ic=None): 874 | p.requires_grad_(True) 875 | 876 | u=super().forward(p) 877 | if p_bc is None: 878 | return u 879 | grad_u = grad(u, p)[0] 880 | u_t = grad_u[:, [0]] 881 | u_x = grad_u[:, [1]] 882 | u_xx = grad(u_x, p)[0][:, [1]] 883 | 884 | p.detach_() 885 | f=u_t-0.0001*u_xx+5*u**3-5*u 886 | 887 | p_bc.requires_grad_(True) 888 | p_bcop=p_bc 889 | p_bcop[:,1]=-p_bc[:,1] 890 | p_bcop.requires_grad_(True) 891 | u_b=super().forward(p_bc) 892 | u_bop=super().forward(p_bcop) 893 | u_b_x=grad(u_b,p_bc)[0][:,[1]] 894 | u_bop_x=grad(u_bop,p_bcop)[0][:,[1]] 895 | p_bc.detach_() 896 | p_bcop.detach_() 897 | 898 | bv_bar=u_b-u_bop 899 | bv_x_bar=u_b_x-u_bop_x 900 | iv_bar=super().forward(p_ic) 901 | return f,bv_bar,bv_x_bar,iv_bar 902 | 903 | 904 | # ### 3.4、Options 905 | 906 | # In[35]: 907 | 908 | 909 | class Options_AC(object): 910 | def __init__(self): 911 | parser = argparse.ArgumentParser() 912 | parser.add_argument('--no_cuda', action='store_true', default=False, help='disable CUDA or not') 913 | parser.add_argument('--dim_hidden', type=int, default=10, help='neurons in hidden layers') 914 | parser.add_argument('--hidden_layers', type=int, default=4, help='number of hidden layers') 915 | parser.add_argument('--res_blocks', type=int, default=4, help='number of residual blocks') 916 | parser.add_argument('--lam', type=float, default=1, help='weight in loss function') 917 | parser.add_argument('--lr', type=float, default=1e-3, help='initial learning rate') 918 | parser.add_argument('--epochs_Adam', type=int, default=1000, help='epochs for Adam optimizer') 919 | parser.add_argument('--epochs_LBFGS', type=int, default=200, help='epochs for LBFGS optimizer') 920 | parser.add_argument('--step_size', type=int, default=2000, help='step size in lr_scheduler for Adam optimizer') 921 | parser.add_argument('--gamma', type=float, default=0.7, help='gamma in lr_scheduler for Adam optimizer') 922 | parser.add_argument('--resume', type=bool, default=False, help='resume or not') 923 | parser.add_argument('--sample_method', type=str, default='lhs', help='sample method') 924 | parser.add_argument('--n_t', type=int, default=100, help='sample points in x-direction for uniform sample') 925 | parser.add_argument('--n_x', type=int, default=100, help='sample points in y-direction for uniform sample') 926 | parser.add_argument('--n', type=int, default=10000, help='sample points in domain for lhs sample') 927 | parser.add_argument('--n_bc', type=int, default=400, help='sample points on the boundary for lhs sample') 928 | parser.add_argument('--n_ic', type=int, default=800, help='sample points on the initial for lhs sample') 929 | 930 | self.parser = parser 931 | def parse(self): 932 | arg = self.parser.parse_args(args=[]) 933 | arg.cuda = not arg.no_cuda and torch.cuda.is_available() 934 | arg.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 935 | return arg 936 | 937 | 938 | # ### 3.5、训练过程 939 | 940 | # In[36]: 941 | 942 | 943 | def save_modelAC(state,is_best=None,save_dir=None): 944 | last_model = os.path.join(save_dir, 'last_modelAC.pth.tar') 945 | torch.save(state, last_model) 946 | if is_best: 947 | best_model = os.path.join(save_dir, 'best_modelAC.pth.tar') 948 | shutil.copyfile(last_model, best_model) 949 | 950 | class Trainer_AC(object): 951 | def __init__(self,args): 952 | self.device=args.device 953 | self.problem=args.problem 954 | 955 | self.lam=args.lam 956 | self.criterion=nn.MSELoss() 957 | 958 | self.model = args.model 959 | self.model_name = self.model.__class__.__name__ 960 | self.model_path = self._model_path() 961 | 962 | self.epochs_Adam = args.epochs_Adam 963 | self.epochs_LBFGS = args.epochs_LBFGS 964 | self.optimizer_Adam = optim.Adam(self.model.parameters(), lr=args.lr) 965 | self.optimizer_LBFGS = optim.LBFGS(self.model.parameters(), 966 | max_iter=20, 967 | tolerance_grad=1.e-8, 968 | tolerance_change=1.e-12) 969 | self.lr_scheduler = StepLR(self.optimizer_Adam, 970 | step_size=args.step_size, 971 | gamma=args.gamma) 972 | 973 | self.model.to(self.device) 974 | self.model.zero_grad() 975 | 976 | self.p, self.p_bc, self.p_ic, self.bv, self.iv = args.trainset(verbose='tensor') 977 | self.f=torch.from_numpy(np.zeros_like(self.p[:,[0]])).float() 978 | 979 | def _model_path(self): 980 | """Path to save the model""" 981 | if not os.path.exists('checkpointsAC'): 982 | os.mkdir('checkpointsAC') 983 | 984 | path = os.path.join('checkpointsAC', self.model_name) 985 | if not os.path.exists(path): 986 | os.mkdir(path) 987 | 988 | return path 989 | 990 | def train(self): 991 | best_loss=1.e10 992 | for epoch in range(self.epochs_Adam): 993 | loss, loss1, loss2 = self.train_Adam() 994 | if (epoch + 1) % 100 == 0: 995 | self.infos_Adam(epoch+1, loss, loss1, loss2) 996 | 997 | valid_loss = self.validate(epoch) 998 | is_best = valid_loss < best_loss 999 | best_loss = valid_loss if is_best else best_loss 1000 | state = { 1001 | 'epoch': epoch, 1002 | 'state_dict': self.model.state_dict(), 1003 | 'best_loss': best_loss 1004 | } 1005 | save_modelAC(state, is_best, save_dir=self.model_path) 1006 | 1007 | for epoch in range(self.epochs_Adam, self.epochs_Adam + self.epochs_LBFGS): 1008 | loss, loss1, loss2 = self.train_LBFGS() 1009 | if (epoch + 1) % 20 == 0: 1010 | self.infos_LBFGS(epoch+1, loss, loss1, loss2) 1011 | 1012 | valid_loss = self.validate(epoch) 1013 | is_best = valid_loss < best_loss 1014 | best_loss = valid_loss if is_best else best_loss 1015 | state = { 1016 | 'epoch': epoch, 1017 | 'state_dict': self.model.state_dict(), 1018 | 'best_loss': best_loss 1019 | } 1020 | save_modelAC(state, is_best, save_dir=self.model_path) 1021 | def train_Adam(self): 1022 | self.optimizer_Adam.zero_grad() 1023 | 1024 | f_pred,bv_pred,bv_x_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 1025 | loss1=self.criterion(f_pred,self.f) 1026 | loss2=10*self.criterion(iv_pred,self.iv)+self.criterion(bv_pred,self.bv)+self.criterion(bv_x_pred,self.bv) 1027 | loss=loss1+self.lam*loss2 1028 | 1029 | loss.backward() 1030 | self.optimizer_Adam.step() 1031 | self.lr_scheduler.step() 1032 | 1033 | return loss.item(),loss1.item(),loss2.item() 1034 | 1035 | def infos_Adam(self,epoch,loss,loss1,loss2): 1036 | infos = 'Adam ' + f'Epoch #{epoch:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.4e} = {loss1:.4e} + {self.lam} * {loss2:.4e} ' + f'lr: {self.lr_scheduler.get_lr()[0]:.2e} ' 1037 | print(infos) 1038 | 1039 | def train_LBFGS(self): 1040 | 1041 | f_pred,bv_pred,bv_x_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 1042 | loss1=self.criterion(f_pred,self.f) 1043 | loss2=10*self.criterion(iv_pred,self.iv)+self.criterion(bv_pred,self.bv)+self.criterion(bv_x_pred,self.bv) 1044 | def closure(): 1045 | if torch.is_grad_enabled(): 1046 | self.optimizer_LBFGS.zero_grad() 1047 | f_pred,bv_pred,bv_x_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 1048 | loss1=self.criterion(f_pred,self.f) 1049 | loss2=self.criterion(iv_pred,self.iv)+self.criterion(bv_pred,self.bv)+self.criterion(bv_x_pred,self.bv) 1050 | loss = loss1 + self.lam * loss2 1051 | 1052 | if loss.requires_grad: 1053 | loss.backward() 1054 | return loss 1055 | self.optimizer_LBFGS.step(closure) 1056 | loss=closure() 1057 | 1058 | return loss.item(), loss1.item(), loss2.item() 1059 | 1060 | def infos_LBFGS(self, epoch, loss, loss1, loss2): 1061 | infos = 'LBFGS ' + f'Epoch #{epoch:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.2e} = {loss1:.2e} + {self.lam:d} * {loss2:.2e} ' 1062 | print(infos) 1063 | 1064 | def validate(self, epoch): 1065 | self.model.eval() 1066 | f_pred,bv_pred,bv_x_pred,iv_pred=self.model(self.problem,self.p,self.p_bc,self.p_ic) 1067 | loss1=self.criterion(f_pred,self.f) 1068 | loss2=10*self.criterion(iv_pred,self.iv)+self.criterion(bv_pred,self.bv)+self.criterion(bv_x_pred,self.bv) 1069 | loss=loss1+self.lam*loss2 1070 | infos = 'Valid ' + f'Epoch #{epoch+1:5d}/{self.epochs_Adam+self.epochs_LBFGS} ' + f'Loss: {loss:.4e} ' 1071 | print(infos) 1072 | self.model.train() 1073 | return loss.item() 1074 | 1075 | 1076 | # In[ ]: 1077 | 1078 | 1079 | # 使用Trainer_AC进行训练 1080 | Problem=Problem_AC() 1081 | args=Options_AC().parse() 1082 | args.problem=Problem_AC() 1083 | args.model = PINN_AC(dim_in=2, 1084 | dim_out=1, 1085 | dim_hidden=args.dim_hidden, 1086 | hidden_layers=args.hidden_layers, 1087 | act_name='sigmoid') 1088 | if args.sample_method == 'uniform': 1089 | args.trainset = Trainset_AC(args.problem, args.n_t, args.n_x, method='uniform') 1090 | elif args.sample_method == 'lhs': 1091 | args.trainset = Trainset_AC(args.problem, args.n, args.n_bc,args.n_ic, method='lhs') 1092 | 1093 | trainer_AC = Trainer_AC(args) 1094 | trainer_AC.train() 1095 | 1096 | 1097 | # In[33]: 1098 | 1099 | 1100 | class Tester_AC(object): 1101 | def __init__(self,args): 1102 | self.device = args.device 1103 | self.problem = args.problem 1104 | self.criterion = nn.MSELoss() 1105 | self.model = args.model 1106 | model_name = self.model.__class__.__name__ 1107 | model_path = os.path.join('checkpointsAC', 1108 | model_name, 1109 | 'best_modelAC.pth.tar') 1110 | best_model = torch.load(model_path) 1111 | self.model.load_state_dict(best_model['state_dict']) 1112 | self.model.to(self.device) 1113 | 1114 | self.p,self.t,self.x=args.testset(verbose='tensor') 1115 | def predict(self): 1116 | self.model.eval() 1117 | u_pred=self.model(self.problem,self.p) 1118 | u_pred=u_pred.detach().cpu().numpy() 1119 | u_pred=u_pred.reshape(self.t.shape) 1120 | 1121 | plt.figure(figsize=(10,3),frameon=False) 1122 | plt.contourf(self.t,self.x,u_pred,levels=1000,cmap='seismic') 1123 | plt.show 1124 | 1125 | 1126 | # ### 3.6、测试过程 1127 | 1128 | # In[34]: 1129 | 1130 | 1131 | # 使用Tester_AC进行预测 1132 | args=Options_AC().parse() 1133 | args.problem=Problem_AC() 1134 | 1135 | args.model = PINN_AC(dim_in=2, 1136 | dim_out=1, 1137 | dim_hidden=args.dim_hidden, 1138 | hidden_layers=args.hidden_layers, 1139 | act_name='sigmoid') 1140 | args.testset = Testset_AC(args.problem, 100, 100) 1141 | tester = Tester_AC(args) 1142 | tester.predict() 1143 | 1144 | --------------------------------------------------------------------------------