├── .gitignore ├── LICENSE ├── NeuralSolvers ├── Geometry.py ├── JoinedDataset.py ├── LossTerm.py ├── ND_Cube.py ├── __init__.py ├── bc_library.py ├── callbacks │ ├── Callback.py │ └── __init__.py ├── loggers │ ├── Logger_Interface.py │ ├── Python_Logger.py │ ├── TensorBoard_Logger.py │ ├── WandB_Logger.py │ └── __init__.py ├── models │ ├── Finger_Net.py │ ├── __init__.py │ ├── activations │ │ ├── __init__.py │ │ └── snake.py │ ├── distributed_moe.py │ ├── mlp.py │ ├── modulated_mlp.py │ ├── moe_finger.py │ ├── moe_finger_test.py │ ├── moe_mlp.py │ ├── moe_mlp_test.py │ ├── pennesmodel.py │ ├── snake_mlp.py │ └── test_Finger_Net.py ├── optimizer.py ├── pde_library.py ├── pinn │ ├── HPMLoss.py │ ├── PDELoss.py │ ├── PINN.py │ ├── __init__.py │ └── datasets │ │ ├── BoundaryCondition.py │ │ ├── InitalCondition.py │ │ └── __init__.py └── samplers │ ├── Adaptive_Sampler.py │ ├── LHS_Sampler.py │ ├── Random_Sampler.py │ ├── Sampler.py │ └── __init__.py ├── README.md ├── benchmarks ├── benchmark_runner.py ├── configs.py └── datasets.py ├── examples ├── Bioheat_2d │ ├── 2D_Bioheat.py │ └── Datasets.py ├── Burgers_Equation_1d │ ├── Burgers_Equation.py │ ├── Burgers_Equation_adaptive.py │ ├── burgers_equation.png │ └── burgers_shock.mat ├── Heat_Equation_1d │ ├── Analytical_Solution.png │ ├── Analytical_different_t.png │ ├── Heat_Equation.py │ ├── Heat_Equation_adaptive.py │ ├── Heat_Equation_inversion.py │ ├── Initial_condition.png │ ├── PDE_residual.png │ ├── PINN.png │ ├── PINN_different_t.png │ ├── PINNvsSol_100.png │ ├── PINNvsSol_199.png │ ├── PINNvsSol_50.png │ └── README.md ├── Schroedinger_1d │ ├── NLS.mat │ ├── Schroedinger-adaptive.py │ └── Schroedinger.py ├── UKD_Heat_2d │ ├── Run_n_analyse.ipynb │ ├── datasets.py │ ├── pennes_hpm.py │ └── run_script.sh └── WaveEquation_1d │ └── 1DWaveNet.ipynb ├── images ├── 1D_Schroedinger_training.gif ├── API_PINN.png ├── cropped_logo.png └── scalability.png ├── requirements.txt ├── setup.py └── tools ├── benchmarks_1d.py └── embedding_tests.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Helmholtz-Zentrum Dresden - Rossendorf 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NeuralSolvers/Geometry.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from torch.utils.data import Dataset 4 | 5 | class Geometry(Dataset): 6 | def __init__(self, lb, ub, n_points, batch_size, sampler, device): 7 | """ 8 | Constructor of the Geometry class 9 | 10 | Args: 11 | lb (numpy.ndarray): lower bound of the domain. 12 | ub (numpy.ndarray): upper bound of the domain. 13 | n_points (int): the number of sampled points. 14 | batch_size (int): batch size 15 | sampler: instance of the Sampler class. 16 | """ 17 | self.lb = lb 18 | self.ub = ub 19 | self.n_points = n_points 20 | self.batch_size = batch_size 21 | self.sampler = sampler 22 | self.device = device 23 | 24 | 25 | def __getitem__(self, idx): 26 | raise NotImplementedError("Subclasses should implement '__getitem__' method") 27 | 28 | def __len__(self): 29 | raise NotImplementedError("Subclasses should implement '__len__' method") 30 | 31 | -------------------------------------------------------------------------------- /NeuralSolvers/JoinedDataset.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset 2 | 3 | 4 | class JoinedDataset(Dataset): 5 | r"""Dataset as a concatenation of multiple datasets. 6 | datasets (sequence): List of datasets to be concatenated 7 | """ 8 | @staticmethod 9 | def min_length(datasets): 10 | """ 11 | Calculates the minimum dataset length of a list of datasets 12 | 13 | datasets (Map): Map of datasets to be concatenated 14 | """ 15 | minimum = float("inf") 16 | for key in datasets.keys(): 17 | length = len(datasets[key]) 18 | if length < minimum: 19 | minimum = length 20 | return minimum 21 | 22 | @staticmethod 23 | def max_length(datasets): 24 | """ 25 | Calculates the minimum dataset length of a list of datasets 26 | 27 | datasets (Map): Map of datasets to be concatenated 28 | """ 29 | maximum = -1 * float("inf") 30 | for key in datasets.keys(): 31 | length = len(datasets[key]) 32 | if length > maximum: 33 | maximum = length 34 | return maximum 35 | 36 | def __init__(self, datasets, mode='min'): 37 | super(JoinedDataset, self).__init__() 38 | self.datasets = datasets 39 | self.mode = mode 40 | 41 | def __len__(self): 42 | if self.mode =='min': 43 | return self.min_length(self.datasets) 44 | if self.mode =='max': 45 | return self.max_length(self.datasets) 46 | 47 | def __getitem__(self, idx): 48 | if idx < 0: 49 | if -idx > len(self): 50 | raise ValueError("absolute value of index should not exceed dataset length") 51 | combined_item = {} 52 | for key in self.datasets.keys(): 53 | if self.mode == 'max': 54 | idx = idx % len(self.datasets[key]) 55 | item = self.datasets[key][idx] 56 | combined_item[key] = item 57 | return combined_item 58 | 59 | def register_dataset(self, key, dataset): 60 | if key in self.datasets: 61 | print("Key already exists. Dataset will be overwritten") 62 | self.datasets[key] = dataset 63 | 64 | -------------------------------------------------------------------------------- /NeuralSolvers/LossTerm.py: -------------------------------------------------------------------------------- 1 | from torch.nn import L1Loss, MSELoss 2 | 3 | 4 | class LossTerm: 5 | """ 6 | Defines the main structure of a loss term 7 | """ 8 | def __init__(self, dataset, name, norm='L2', weight=1.): 9 | """ 10 | Constructor of a loss term 11 | 12 | Args: 13 | dataset (torch.utils.Dataset): dataset that provides the residual points 14 | norm: Norm used for calculation PDE loss 15 | weight: Weighting for the loss term 16 | """ 17 | # cases for standard torch norms 18 | if norm == 'L2': 19 | self.norm = MSELoss() 20 | elif norm == 'L1': 21 | self.norm = L1Loss() 22 | else: 23 | # Case for self implemented norms 24 | self.norm = norm 25 | self.dataset = dataset 26 | self.name = name 27 | self.weight = weight 28 | 29 | -------------------------------------------------------------------------------- /NeuralSolvers/ND_Cube.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .Geometry import Geometry 3 | 4 | class NDCube(Geometry): 5 | def __init__(self, lb, ub, n_points, batch_size, sampler, device = 'cpu'): 6 | """ 7 | Constructor of the NDCube class 8 | 9 | Args: 10 | lb (numpy.ndarray): lower bound of the domain. 11 | ub (numpy.ndarray): upper bound of the domain. 12 | n_points (int): the number of sampled points. 13 | batch_size (int): batch size 14 | sampler: instance of the Sampler class. 15 | """ 16 | super(NDCube, self).__init__(lb, ub, n_points, batch_size, sampler, device) 17 | 18 | 19 | def __getitem__(self, idx): 20 | """ 21 | Returns data at given index 22 | Args: 23 | idx (int) 24 | """ 25 | self.x = self.sampler.sample(self.lb,self.ub, self.batch_size).to(self.device) 26 | 27 | if type(self.x) is tuple: 28 | x, w = self.x 29 | return torch.cat((x, w), 1) 30 | else: 31 | return self.x 32 | 33 | def __len__(self): 34 | """Length of the dataset""" 35 | return self.n_points // self.batch_size -------------------------------------------------------------------------------- /NeuralSolvers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .ND_Cube import NDCube 3 | from .pinn.PINN import PINN 4 | from .pde_library import wave1D, burgers1D, schrodinger1D, heat1D 5 | from .bc_library import dirichlet 6 | 7 | import NeuralSolvers.models 8 | import NeuralSolvers.callbacks 9 | import NeuralSolvers.pinn 10 | import NeuralSolvers.samplers 11 | import NeuralSolvers.loggers 12 | 13 | __all__ = [ 14 | 'NDCube', 15 | 'PINN', 16 | 'wave1D', 17 | 'burgers1D', 18 | "schrodinger1D", 19 | "heat1D", 20 | "dirichlet" 21 | ] 22 | -------------------------------------------------------------------------------- /NeuralSolvers/bc_library.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def dirichlet(x): 4 | return torch.zeros_like(x)[:, 0].reshape(-1, 1) -------------------------------------------------------------------------------- /NeuralSolvers/callbacks/Callback.py: -------------------------------------------------------------------------------- 1 | from torch.nn import Module 2 | from NeuralSolvers.loggers.Logger_Interface import LoggerInterface 3 | 4 | 5 | class Callback: 6 | def __init__(self): 7 | self.model = None 8 | self.logger = None 9 | 10 | def set_model(self, model): 11 | if isinstance(model, Module): 12 | self.model = model 13 | else: 14 | raise ValueError("Model is not of type but model of type {} was found" 15 | .format(type(model))) 16 | 17 | def set_logger(self, logger): 18 | if isinstance(LoggerInterface): 19 | self.logger 20 | else: 21 | raise ValueError("Logger is not of type but logger of type {} was found" 22 | .format(type(logger))) 23 | 24 | def __call__(self, epoch): 25 | raise NotImplementedError("method __call__() of the callback is not implemented") 26 | 27 | 28 | class CallbackList: 29 | def __init__(self, callbacks): 30 | if isinstance(callbacks, list): 31 | for cb in callbacks: 32 | if not isinstance(cb, Callback): 33 | raise ValueError("Callback has to be of type but type {} was found" 34 | .format(type(cb))) 35 | self.callbacks = callbacks 36 | else: 37 | raise ValueError("Callback has to be of type but type {} was found" 38 | .format(type(callbacks))) 39 | 40 | def __call__(self, epoch): 41 | for cb in self.callbacks: 42 | cb(epoch) 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /NeuralSolvers/callbacks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/NeuralSolvers/callbacks/__init__.py -------------------------------------------------------------------------------- /NeuralSolvers/loggers/Logger_Interface.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class LoggerInterface(ABC): 5 | 6 | @abstractmethod 7 | def log_scalar(self, scalar, name, epoch): 8 | """ 9 | Method that defines how scalars are logged 10 | 11 | Args: 12 | scalar: scalar to be logged 13 | name: name of the scalar 14 | epoch: epoch in the training loop 15 | 16 | """ 17 | pass 18 | 19 | @abstractmethod 20 | def log_image(self, image, name, epoch): 21 | """ 22 | Method that defines how images are logged 23 | 24 | Args: 25 | image: image to be logged 26 | name: name of the image 27 | epoch: epoch in the training loop 28 | 29 | """ 30 | pass 31 | 32 | @abstractmethod 33 | def log_histogram(self, histogram, name, epoch): 34 | """ 35 | Method that defines how images are logged 36 | 37 | Args: 38 | histogram: histogram to be logged 39 | name: name of the histogram 40 | epoch: epoch in the training loop 41 | 42 | """ 43 | pass 44 | 45 | 46 | -------------------------------------------------------------------------------- /NeuralSolvers/loggers/Python_Logger.py: -------------------------------------------------------------------------------- 1 | from .Logger_Interface import LoggerInterface 2 | 3 | class PythonLogger(LoggerInterface): 4 | 5 | def __init__(self, logdir=None): 6 | 7 | self.loss_history = {} 8 | 9 | def log_scalar(self, scalar, name, epoch): 10 | """ 11 | Add scalar data to summary. 12 | 13 | Args: 14 | scalar: the scalar to be logged 15 | name: name of the sclar 16 | epoch: epoch in the training loop 17 | """ 18 | self.loss_history[name] = scalar 19 | 20 | def log_image(self, image, name, epoch): 21 | """ 22 | Add image data to summary. 23 | Note that this requires the 'pillow' package. 24 | 25 | Args: 26 | image (Image) : the image tensor of shape (3,H,W) to be logged 27 | name (String) : name of the image 28 | epoch (Integer) : epoch in the training loop 29 | 30 | """ 31 | self.loss_history[name] = image 32 | 33 | 34 | def log_plot(self, plot, name, epoch): 35 | """ 36 | Logs a plot to wandb. 37 | 38 | Args: 39 | plot (plot) : the plot to be logged 40 | name (String) : name of the plot 41 | epoch (Integer) : epoch in the training loop 42 | 43 | """ 44 | return 45 | 46 | def log_histogram(self, histogram,name, epoch): 47 | """ 48 | Logs a histogram to wandb 49 | 50 | Args: 51 | histogram (histogram) : the histogram to be logged 52 | name (String) : name of the histogram 53 | epoch (Integer) : epoch in the training loop 54 | 55 | """ 56 | return 57 | -------------------------------------------------------------------------------- /NeuralSolvers/loggers/TensorBoard_Logger.py: -------------------------------------------------------------------------------- 1 | from .Logger_Interface import LoggerInterface 2 | from tensorboardX import SummaryWriter 3 | 4 | class TensorBoardLogger(LoggerInterface): 5 | 6 | def __init__(self, logdir=None): 7 | """ 8 | Create an event file in a given directory. 9 | 10 | Args: 11 | logdir: save directory location 12 | """ 13 | self.writer = SummaryWriter(logdir) 14 | 15 | def log_scalar(self, scalar, name, epoch): 16 | """ 17 | Add scalar data to summary. 18 | 19 | Args: 20 | scalar: the scalar to be logged 21 | name: name of the sclar 22 | epoch: epoch in the training loop 23 | """ 24 | self.writer.add_scalar(name, scalar, epoch) 25 | 26 | def log_image(self, image, name, epoch): 27 | """ 28 | Add image data to summary. 29 | Note that this requires the 'pillow' package. 30 | 31 | Args: 32 | image (Image) : the image tensor of shape (3,H,W) to be logged 33 | name (String) : name of the image 34 | epoch (Integer) : epoch in the training loop 35 | 36 | """ 37 | self.writer.add_image(name, image, epoch) 38 | 39 | 40 | def log_plot(self, plot, name, epoch): 41 | """ 42 | Logs a plot to wandb. 43 | 44 | Args: 45 | plot (plot) : the plot to be logged 46 | name (String) : name of the plot 47 | epoch (Integer) : epoch in the training loop 48 | 49 | """ 50 | self.writer.add_figure(name, plot, epoch) 51 | 52 | def log_histogram(self, histogram,name, epoch): 53 | """ 54 | Logs a histogram to wandb 55 | 56 | Args: 57 | histogram (histogram) : the histogram to be logged 58 | name (String) : name of the histogram 59 | epoch (Integer) : epoch in the training loop 60 | 61 | """ 62 | self.writer.add_histogram(name, histogram, epoch) 63 | -------------------------------------------------------------------------------- /NeuralSolvers/loggers/WandB_Logger.py: -------------------------------------------------------------------------------- 1 | from .Logger_Interface import LoggerInterface 2 | import wandb 3 | 4 | 5 | class WandbLogger(LoggerInterface): 6 | 7 | def __init__(self, project, args, entity=None, group=None): 8 | """ 9 | Initialize wandb instance and connect to the server 10 | 11 | Args: 12 | project: name of the project 13 | args: hyperparameters used for this runs 14 | writing_cycle: defines the writing period 15 | entity: account or group id used for that run 16 | """ 17 | wandb.init(project=project, entity=entity, group=group) 18 | wandb.config.update(args) # adds all of the arguments as config variable 19 | 20 | def log_scalar(self, scalar, name, epoch): 21 | """ 22 | Logs a scalar to wandb 23 | 24 | Args: 25 | scalar: the scalar to be logged 26 | name: name of the sclar 27 | epoch: epoch in the training loop 28 | """ 29 | wandb.log({name: scalar}, step=epoch) 30 | 31 | def log_image(self, image, name, epoch): 32 | """ 33 | Logs a image to wandb 34 | 35 | Args: 36 | image (Image) : the image to be logged 37 | name (String) : name of the image 38 | epoch (Integer) : epoch in the training loop 39 | 40 | """ 41 | wandb.log({name: [wandb.Image(image, caption=name)]}, step=epoch) 42 | 43 | def log_plot(self, plot, name, epoch): 44 | """ 45 | Logs a plot to wandb 46 | 47 | Args: 48 | plot (plot) : the plot to be logged 49 | name (String) : name of the plot 50 | epoch (Integer) : epoch in the training loop 51 | 52 | """ 53 | wandb.log({name: plot}, step=epoch) 54 | 55 | def log_histogram(self, histogram,name, epoch): 56 | """ 57 | Logs a histogram to wandb 58 | 59 | Args: 60 | histogram (histogram) : the histogram to be logged 61 | name (String) : name of the histogram 62 | epoch (Integer) : epoch in the training loop 63 | 64 | """ 65 | wandb.log({name: wandb.Histogram(histogram)}, step=epoch) 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /NeuralSolvers/loggers/__init__.py: -------------------------------------------------------------------------------- 1 | from .Logger_Interface import LoggerInterface 2 | from .TensorBoard_Logger import TensorBoardLogger 3 | from .WandB_Logger import WandbLogger 4 | from .Python_Logger import PythonLogger 5 | 6 | __all__ = [ 7 | 'LoggerInterface', 8 | 'WandbLogger', 9 | 'TensorBoardLogger', 10 | 'PythonLogger' 11 | ] -------------------------------------------------------------------------------- /NeuralSolvers/models/Finger_Net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class FingerNet(nn.Module): 5 | def __init__(self, lb, ub, inputSize, outputSize, numFeatures = 500, num_finger_layers = 3,numLayers = 8, activation = torch.relu, normalize=True, scaling=1.): 6 | torch.manual_seed(1234) 7 | super(FingerNet, self).__init__() 8 | self.input_size = inputSize 9 | self.num_finger_layers= num_finger_layers 10 | self.numFeatures = numFeatures 11 | self.numLayers = numLayers 12 | self.lin_layers = nn.ModuleList() 13 | self.lb = torch.tensor(lb).float() 14 | self.ub = torch.tensor(ub).float() 15 | self.activation = activation 16 | self.normalize = normalize 17 | self.scaling = scaling 18 | self.output_size = outputSize 19 | self.init_layers() 20 | 21 | 22 | def init_layers(self): 23 | """ 24 | This function creates the torch layers and initialize them with xavier 25 | :param self: 26 | :return: 27 | """ 28 | self.finger_nets = nn.ModuleList() 29 | self.lin_layers = nn.ModuleList() 30 | for i in range(self.input_size): 31 | self.finger_nets.append(nn.ModuleList()) 32 | self.finger_nets[i].append(nn.Linear(1, self.numFeatures)) 33 | for _ in range(self.num_finger_layers-1): 34 | self.finger_nets[i].append(nn.Linear(self.numFeatures, self.numFeatures)) 35 | for m in self.finger_nets[i]: 36 | if isinstance(m, nn.Linear): 37 | nn.init.xavier_uniform_(m.weight) 38 | 39 | self.lin_layers.append(nn.Linear(self.input_size * self.numFeatures, self.numFeatures)) 40 | for i in range(self.numLayers-1): 41 | inFeatures = self.numFeatures 42 | self.lin_layers.append(nn.Linear(inFeatures, self.numFeatures)) 43 | inFeatures = self.numFeatures 44 | self.lin_layers.append(nn.Linear(inFeatures, self.output_size)) 45 | for m in self.lin_layers: 46 | if isinstance(m, nn.Linear): 47 | nn.init.xavier_uniform_(m.weight) 48 | nn.init.constant_(m.bias, 0) 49 | 50 | def forward(self, x_in): 51 | if self.normalize: 52 | x_in = 2.0 * (x_in - self.lb) / (self.ub - self.lb) - 1.0 53 | 54 | input_tensors = [] 55 | for i in range(self.input_size): 56 | input_tensors.append(x_in[:, i].view(-1, 1)) 57 | 58 | output_tensors = [] 59 | 60 | for finger_idx in range(self.input_size): 61 | x_in = input_tensors[finger_idx] 62 | for i in range(0, self.num_finger_layers): 63 | x_in = self.finger_nets[finger_idx][i](x_in) 64 | x_in = self.activation(x_in) 65 | output_tensors.append(x_in) 66 | 67 | x = torch.cat(output_tensors, 1) 68 | for i in range(0, len(self.lin_layers)-1): 69 | x = self.lin_layers[i](x) 70 | x = self.activation(x) 71 | x = self.lin_layers[-1](x) 72 | 73 | return self.scaling * x 74 | 75 | def cuda(self): 76 | super(FingerNet, self).cuda() 77 | for layers in self.finger_nets: 78 | layers.cuda() 79 | self.lin_layers.cuda() 80 | self.lb = self.lb.cuda() 81 | self.ub = self.ub.cuda() 82 | 83 | def cpu(self): 84 | super(FingerNet, self).cpu() 85 | for layers in self.finger_nets: 86 | layers.cpu() 87 | self.lb = self.lb.cpu() 88 | self.ub = self.ub.cpu() 89 | 90 | def to(self, device): 91 | super(FingerNet, self).to(device) 92 | for layers in self.finger_nets: 93 | layers.to(device) 94 | self.lb = self.lb.to(device) 95 | self.ub = self.ub.to(device) -------------------------------------------------------------------------------- /NeuralSolvers/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import MLP 2 | from .distributed_moe import MoE as distMoe 3 | from .moe_mlp import MoE as MoE 4 | from .snake_mlp import SnakeMLP 5 | from .Finger_Net import FingerNet 6 | from .moe_finger import MoE as FingerMoE 7 | from .pennesmodel import PennesHPM 8 | from .modulated_mlp import ModulatedMLP 9 | from . import activations 10 | 11 | __all__ = [ 12 | 'MLP', 13 | 'MoE', 14 | 'distMoe', 15 | 'SnakeMLP', 16 | 'FingerNet', 17 | 'FingerMoE', 18 | 'activations', 19 | 'PennesHPM', 20 | 'ModulatedMLP' 21 | 22 | ] 23 | 24 | -------------------------------------------------------------------------------- /NeuralSolvers/models/activations/__init__.py: -------------------------------------------------------------------------------- 1 | from .snake import Snake 2 | 3 | __all__ = [ 4 | 'Snake' 5 | ] 6 | -------------------------------------------------------------------------------- /NeuralSolvers/models/activations/snake.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Snake(nn.Module,): 6 | """ Implementation of the snake activation function as a torch nn module 7 | The result of the activation function a(x) is calculated by a(x) = x + sin^2(x) 8 | With alpha is a trainab 9 | """ 10 | 11 | def __init__(self,frequency=10): 12 | """Constructor function that initialize the torch module 13 | """ 14 | super(Snake, self).__init__() 15 | 16 | # making beta trainable by activating gradient calculation 17 | self.a = nn.Parameter(torch.tensor([float(frequency)], requires_grad=True)) 18 | 19 | def forward(self, x): 20 | return x + ((torch.sin(self.a* x)) ** 2) / self.a -------------------------------------------------------------------------------- /NeuralSolvers/models/distributed_moe.py: -------------------------------------------------------------------------------- 1 | # Sparsely-Gated Mixture-of-Experts Layers. 2 | # See "Outrageously Large Neural Networks" 3 | # https://arxiv.org/abs/1701.06538 4 | # 5 | # Author: David Rau 6 | # 7 | # The code is based on the TensorFlow implementation: 8 | # https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/expert_utils.py 9 | 10 | import time 11 | import torch 12 | import torch.nn as nn 13 | from torch.distributions.normal import Normal 14 | import numpy as np 15 | import torch.nn.functional as F 16 | from NeuralSolvers.models.mlp import MLP 17 | 18 | class SparseDispatcher(object): 19 | """Helper for implementing a mixture of experts. 20 | The purpose of this class is to create input minibatches for the 21 | experts and to combine the results of the experts to form a unified 22 | output tensor. 23 | There are two functions: 24 | dispatch - take an input Tensor and create input Tensors for each expert. 25 | combine - take output Tensors from each expert and form a combined output 26 | Tensor. Outputs from different experts for the same batch element are 27 | summed together, weighted by the provided "gates". 28 | The class is initialized with a "gates" Tensor, which specifies which 29 | batch elements go to which experts, and the weights to use when combining 30 | the outputs. Batch element b is sent to expert e iff gates[b, e] != 0. 31 | The inputs and outputs are all two-dimensional [batch, depth]. 32 | Caller is responsible for collapsing additional dimensions prior to 33 | calling this class and reshaping the output to the original shape. 34 | See common_layers.reshape_like(). 35 | Example use: 36 | gates: a float32 `Tensor` with shape `[batch_size, num_experts]` 37 | inputs: a float32 `Tensor` with shape `[batch_size, input_size]` 38 | experts: a list of length `num_experts` containing sub-networks. 39 | dispatcher = SparseDispatcher(num_experts, gates) 40 | expert_inputs = dispatcher.dispatch(inputs) 41 | expert_outputs = [experts[i](expert_inputs[i]) for i in range(num_experts)] 42 | outputs = dispatcher.combine(expert_outputs) 43 | The preceding code sets the output for a particular example b to: 44 | output[b] = Sum_i(gates[b, i] * experts[i](inputs[b])) 45 | This class takes advantage of sparsity in the gate matrix by including in the 46 | `Tensor`s for expert i only the batch elements for which `gates[b, i] > 0`. 47 | """ 48 | 49 | def __init__(self, num_experts, gates, device = "cpu"): 50 | """Create a SparseDispatcher.""" 51 | self.device = device 52 | self._gates = gates 53 | self._num_experts = num_experts 54 | # sort experts 55 | sorted_experts, index_sorted_experts = torch.nonzero(gates).sort(0) 56 | # drop indices 57 | _, self._expert_index = sorted_experts.split(1, dim=1) 58 | # get according batch index for each expert 59 | self._batch_index = sorted_experts[index_sorted_experts[:, 1],0] 60 | # calculate num samples that each expert gets 61 | self._part_sizes = list((gates > 0).sum(0).to(device))#.cuda()) 62 | # expand gates to match with self._batch_index 63 | gates_exp = gates[self._batch_index.flatten()] 64 | self._nonzero_gates = torch.gather(gates_exp, 1, self._expert_index) 65 | 66 | def dispatch(self, inp): 67 | """Create one input Tensor for each expert. 68 | The `Tensor` for a expert `i` contains the slices of `inp` corresponding 69 | to the batch elements `b` where `gates[b, i] > 0`. 70 | Args: 71 | inp: a `Tensor` of shape "[batch_size, ]` 72 | Returns: 73 | a list of `num_experts` `Tensor`s with shapes 74 | `[expert_batch_size_i, ]`. 75 | """ 76 | 77 | # assigns samples to experts whose gate is nonzero 78 | 79 | # expand according to batch index so we can just split by _part_sizes 80 | inp_exp = inp[self._batch_index].squeeze(1) 81 | return torch.split(inp_exp, self._part_sizes, dim=0) 82 | 83 | 84 | def combine(self, expert_out, multiply_by_gates=True): 85 | """Sum together the expert output, weighted by the gates. 86 | The slice corresponding to a particular batch element `b` is computed 87 | as the sum over all experts `i` of the expert output, weighted by the 88 | corresponding gate values. If `multiply_by_gates` is set to False, the 89 | gate values are ignored. 90 | Args: 91 | expert_out: a list of `num_experts` `Tensor`s, each with shape 92 | `[expert_batch_size_i, ]`. 93 | multiply_by_gates: a boolean 94 | Returns: 95 | a `Tensor` with shape `[batch_size, ]`. 96 | """ 97 | # apply exp to expert outputs, so we are not longer in log space 98 | stitched = torch.cat(expert_out, 0).exp() 99 | 100 | if multiply_by_gates: 101 | stitched = stitched.mul(self._nonzero_gates) 102 | zeros = torch.zeros(self._gates.size(0), expert_out[-1].size(1), requires_grad=True).to(self.device)#.cuda() 103 | # combine samples that have been processed by the same k experts 104 | combined = zeros.index_add(0, self._batch_index, stitched.float()) 105 | # add eps to all zero values in order to avoid nans when going back to log space 106 | combined[combined == 0] = np.finfo(float).eps 107 | # back to log space 108 | return combined.log() 109 | 110 | 111 | def expert_to_gates(self): 112 | """Gate values corresponding to the examples in the per-expert `Tensor`s. 113 | Returns: 114 | a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` 115 | and shapes `[expert_batch_size_i]` 116 | """ 117 | # split nonzero gates for each expert 118 | return torch.split(self._nonzero_gates, self._part_sizes, dim=0) 119 | 120 | 121 | 122 | 123 | class MoE(nn.Module): 124 | 125 | """Call a Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. 126 | Args: 127 | input_size: integer - size of the input 128 | output_size: integer - size of the input 129 | num_experts: an integer - number of experts 130 | hidden_size: an integer - hidden size of the experts 131 | noisy_gating: a boolean 132 | k: an integer - how many experts to use for each batch element 133 | """ 134 | 135 | def __init__(self, input_size, output_size, num_experts, 136 | hidden_size, num_hidden, lb, ub, activation=torch.tanh, 137 | non_linear=False, noisy_gating=False, k=1, device = "cpu"): 138 | super(MoE, self).__init__() 139 | self.noisy_gating = noisy_gating 140 | self.num_experts = num_experts 141 | self.output_size = output_size 142 | self.input_size = input_size 143 | self.hidden_size = hidden_size 144 | self.device = device 145 | self.k = k 146 | self.loss = 0 147 | self.num_devices = torch.cuda.device_count() - 1 # cuda:0 is for handling data 148 | print("The model runs on {} devices".format(self.num_devices)) 149 | # instantiate experts on the needed GPUs 150 | self.experts = nn.ModuleList([ 151 | MLP(input_size, output_size, hidden_size, num_hidden, lb, ub, activation, 152 | device='cuda:{}'.format((i % self.num_devices)+1)) 153 | .to('cuda:{}'.format((i % self.num_devices)+1))for i in range(self.num_experts) 154 | ]) 155 | self.w_gate = nn.Parameter(torch.randn(input_size, num_experts, device=self.device), requires_grad=True) 156 | self.w_noise = nn.Parameter(torch.zeros(input_size, num_experts, device=self.device), requires_grad=True) 157 | 158 | self.softplus = nn.Softplus() 159 | self.softmax = nn.Softmax(1) 160 | self.normal = Normal(torch.tensor([0.0]).to(self.device), torch.tensor([1.0])) 161 | 162 | self.non_linear = non_linear 163 | if self.non_linear: 164 | self.gating_network = MLP(input_size, num_experts, num_experts*2, 1, lb, ub,activation=F.relu, 165 | device=self.device).to(self.device) 166 | 167 | assert(self.k <= self.num_experts) 168 | 169 | def cv_squared(self, x): 170 | """The squared coefficient of variation of a sample. 171 | Useful as a loss to encourage a positive distribution to be more uniform. 172 | Epsilons added for numerical stability. 173 | Returns 0 for an empty Tensor. 174 | Args: 175 | x: a `Tensor`. 176 | Returns: 177 | a `Scalar`. 178 | """ 179 | eps = 1e-10 180 | # if only num_experts = 1 181 | if x.shape[0] == 1: 182 | return torch.Tensor([0]) 183 | return x.float().var() / (x.float().mean()**2 + eps) 184 | 185 | 186 | def _gates_to_load(self, gates): 187 | """Compute the true load per expert, given the gates. 188 | The load is the number of examples for which the corresponding gate is >0. 189 | Args: 190 | gates: a `Tensor` of shape [batch_size, n] 191 | Returns: 192 | a float32 `Tensor` of shape [n] 193 | """ 194 | return (gates > 0).sum(0) 195 | 196 | def _prob_in_top_k(self, clean_values, noisy_values, noise_stddev, noisy_top_values): 197 | """Helper function to NoisyTopKGating. 198 | Computes the probability that value is in top k, given different random noise. 199 | This gives us a way of backpropagating from a loss that balances the number 200 | of times each expert is in the top k experts per example. 201 | In the case of no noise, pass in None for noise_stddev, and the result will 202 | not be differentiable. 203 | Args: 204 | clean_values: a `Tensor` of shape [batch, n]. 205 | noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus 206 | normally distributed noise with standard deviation noise_stddev. 207 | noise_stddev: a `Tensor` of shape [batch, n], or None 208 | noisy_top_values: a `Tensor` of shape [batch, m]. 209 | "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 210 | Returns: 211 | a `Tensor` of shape [batch, n]. 212 | """ 213 | 214 | batch = clean_values.size(0) 215 | m = noisy_top_values.size(1) 216 | top_values_flat = noisy_top_values.flatten() 217 | threshold_positions_if_in = (torch.arange(batch) * m + self.k).to(self.device) 218 | threshold_if_in = torch.unsqueeze(torch.gather(top_values_flat, 0, threshold_positions_if_in), 1) 219 | is_in = torch.gt(noisy_values, threshold_if_in) 220 | threshold_positions_if_out = (threshold_positions_if_in - 1).to(self.device) 221 | threshold_if_out = torch.unsqueeze(torch.gather(top_values_flat, 0, threshold_positions_if_out), 1) 222 | # is each value currently in the top k. 223 | prob_if_in = self.normal.cdf((clean_values - threshold_if_in)/noise_stddev) 224 | prob_if_out = self.normal.cdf((clean_values - threshold_if_out)/noise_stddev) 225 | prob = torch.where(is_in, prob_if_in, prob_if_out) 226 | return prob 227 | 228 | 229 | def noisy_top_k_gating(self, x, train, noise_epsilon=1e-1): 230 | """Noisy top-k gating. 231 | See paper: https://arxiv.org/abs/1701.06538. 232 | Args: 233 | x: input Tensor with shape [batch_size, input_size] 234 | train: a boolean - we only add noise at training time. 235 | noise_epsilon: a float 236 | Returns: 237 | gates: a Tensor with shape [batch_size, num_experts] 238 | load: a Tensor with shape [num_experts] 239 | """ 240 | 241 | if self.non_linear: 242 | clean_logits = self.gating_network(x) 243 | else: 244 | clean_logits = x @ self.w_gate 245 | if self.noisy_gating: 246 | raw_noise_stddev = x @ self.w_noise 247 | if(self.k > 1): 248 | raw_noise_stddev = self.softplus(raw_noise_stddev) 249 | noise_stddev = ((raw_noise_stddev + noise_epsilon) * train) 250 | noisy_logits = clean_logits + (torch.randn_like(clean_logits) * noise_stddev) 251 | logits = noisy_logits 252 | else: 253 | logits = clean_logits 254 | 255 | # calculate topk + 1 that will be needed for the noisy gates 256 | top_logits, top_indices = logits.topk(min(self.k + 1, self.num_experts), dim=1) 257 | top_k_logits = top_logits[:, :self.k] 258 | top_k_indices = top_indices[:, :self.k] 259 | if(self.k > 1): 260 | top_k_gates = self.softmax(top_k_logits) 261 | else: 262 | top_k_gates = torch.sigmoid(top_k_logits) 263 | zeros = torch.zeros_like(logits, requires_grad=True) 264 | gates = zeros.scatter(1, top_k_indices, top_k_gates) 265 | 266 | if self.noisy_gating and self.k < self.num_experts: 267 | load = (self._prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits)).sum(0) 268 | else: 269 | load = self._gates_to_load(gates) 270 | return gates, load 271 | 272 | 273 | def get_utilisation_loss(self): 274 | return self.loss 275 | 276 | 277 | def forward(self, x, train=True, loss_coef=1e-2): 278 | """Args: 279 | x: tensor shape [batch_size, input_size] 280 | train: a boolean scalar. 281 | loss_coef: a scalar - multiplier on load-balancing losses 282 | Returns: 283 | y: a tensor with shape [batch_size, output_size]. 284 | extra_training_loss: a scalar. This should be added into the overall 285 | training loss of the model. The backpropagation of this loss 286 | encourages all experts to be approximately equally used across a batch. 287 | """ 288 | gates, load = self.noisy_top_k_gating(x, train) 289 | # calculate importance loss 290 | importance = gates.sum(0) 291 | # 292 | loss = self.cv_squared(importance) + self.cv_squared(load) 293 | loss *= loss_coef 294 | 295 | self.loss = loss 296 | 297 | dispatcher = SparseDispatcher(self.num_experts, gates, device=self.device) 298 | expert_inputs = dispatcher.dispatch(x) 299 | gates = dispatcher.expert_to_gates() 300 | # Here is a loop needed for asynchonous calls of the GPUs 301 | expert_outputs = [] 302 | for i in range(self.num_experts): 303 | # move data to device 304 | exp_input = expert_inputs[i].to(self.experts[i].device) 305 | expert_output = self.experts[i](exp_input) 306 | # move expert output back to device 307 | expert_output = expert_output.to(self.device) 308 | # append it for stiching 309 | expert_outputs.append(expert_output) 310 | y = dispatcher.combine(expert_outputs) 311 | return y 312 | -------------------------------------------------------------------------------- /NeuralSolvers/models/mlp.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | def set_seed(seed=2342): 7 | torch.manual_seed(seed) 8 | torch.cuda.manual_seed(seed) 9 | torch.cuda.manual_seed_all(seed) # for multi-GPU 10 | torch.backends.cudnn.deterministic = True 11 | torch.backends.cudnn.benchmark = False 12 | 13 | 14 | class MLP(nn.Module): 15 | def __init__(self, input_size, output_size, hidden_size, num_hidden, lb, ub, activation=torch.tanh, normalize=True, device='cpu'): 16 | set_seed(2342) 17 | super(MLP, self).__init__() 18 | self.linear_layers = nn.ModuleList() 19 | self.activation = activation 20 | self.init_layers(input_size, output_size, hidden_size,num_hidden) 21 | self.lb = torch.Tensor(lb).float().to(device) 22 | self.ub = torch.Tensor(ub).float().to(device) 23 | self.linear_layers.to(device) 24 | self.normalize = normalize 25 | self.device = device 26 | 27 | def init_layers(self, input_size, output_size, hidden_size, num_hidden): 28 | self.linear_layers.append(nn.Linear(input_size, hidden_size)) 29 | for _ in range(num_hidden): 30 | self.linear_layers.append(nn.Linear(hidden_size, hidden_size)) 31 | self.linear_layers.append(nn.Linear(hidden_size, output_size)) 32 | 33 | for m in self.linear_layers: 34 | if isinstance(m, nn.Linear): 35 | nn.init.xavier_normal_(m.weight) 36 | nn.init.constant_(m.bias, 0) 37 | 38 | def forward(self, x): 39 | if x.device != self.device: 40 | warnings.warn(f"Input tensor was on {x.device}, but model is on {self.device}. " 41 | f"Input tensor has been moved to {self.device}. " 42 | "This may slow down computation. Consider moving your input tensor to the correct device before calling the model.", 43 | UserWarning) 44 | x = x.to(self.device) 45 | 46 | if self.normalize: 47 | x = 2.0*(x - self.lb)/(self.ub - self.lb) - 1.0 48 | for i in range(len(self.linear_layers) - 1): 49 | x = self.linear_layers[i](x) 50 | x = self.activation(x) 51 | x = self.linear_layers[-1](x) 52 | return x 53 | 54 | def cuda(self): 55 | super(MLP, self).cuda() 56 | self.lb = self.lb.cuda() 57 | self.ub = self.ub.cuda() 58 | 59 | def cpu(self): 60 | super(MLP, self).cpu() 61 | self.lb = self.lb.cpu() 62 | self.ub = self.ub.cpu() 63 | 64 | def to(self, device): 65 | super(MLP, self).to(device) 66 | self.lb = self.lb.to(device) 67 | self.ub = self.ub.to(device) 68 | -------------------------------------------------------------------------------- /NeuralSolvers/models/modulated_mlp.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from torchvision.models import vit_b_16, vit_l_16, vit_l_32 6 | from torchvision.transforms import Compose, Resize, Normalize, ToTensor 7 | from .mlp import set_seed 8 | 9 | class ModulatedMLP(nn.Module): 10 | def __init__(self, input_size, output_size, hidden_size, num_hidden, lb, ub, 11 | activation=torch.tanh, normalize=True, nfeat_mod = 128, device='cpu'): 12 | """ 13 | MLP with bias modulation using embeddings from a ViT model. 14 | 15 | Args: 16 | input_size (int): Input dimensionality (e.g., x, t). 17 | output_size (int): Output dimensionality (e.g., u(x, t)). 18 | hidden_size (int): Hidden layer size. 19 | num_hidden (int): Number of hidden layers. 20 | lb (list): Lower bounds for normalization. 21 | ub (list): Upper bounds for normalization. 22 | u_i (Tensor): reference solution (expected to be 2d) 23 | activation (callable): Activation function for hidden layers. 24 | normalize (bool): Whether to normalize inputs. 25 | device (str): Device to use ('cpu' or 'cuda'). 26 | """ 27 | set_seed(2342) 28 | super(ModulatedMLP, self).__init__() 29 | self.linear_layers = nn.ModuleList() 30 | self.activation = activation 31 | self.lb = torch.Tensor(lb).float() 32 | self.ub = torch.Tensor(ub).float() 33 | self.normalize = normalize 34 | self.device = device 35 | self.nfeat_mod = nfeat_mod 36 | # Modulation network to process spatio-temporal information of h_1 37 | self.modulation_network = nn.Sequential( 38 | nn.Linear(hidden_size, self.nfeat_mod), 39 | nn.ReLU(), 40 | nn.Linear(self.nfeat_mod, hidden_size * num_hidden) 41 | ) 42 | 43 | self.init_layers(input_size, output_size, hidden_size, num_hidden) 44 | 45 | def init_layers(self, input_size, output_size, hidden_size, num_hidden): 46 | self.linear_layers.append(nn.Linear(input_size, hidden_size)) 47 | for _ in range(num_hidden): 48 | self.linear_layers.append(nn.Linear(hidden_size, hidden_size)) 49 | self.linear_layers.append(nn.Linear(hidden_size, output_size)) 50 | 51 | for m in self.linear_layers: 52 | if isinstance(m, nn.Linear): 53 | nn.init.xavier_normal_(m.weight) 54 | nn.init.constant_(m.bias, 0) 55 | 56 | for m in self.modulation_network: 57 | if isinstance(m, nn.Linear): 58 | nn.init.xavier_normal_(m.weight) 59 | nn.init.constant_(m.bias, 0) 60 | 61 | 62 | def forward(self, x): 63 | """ 64 | Forward pass with ViT-based bias modulation. 65 | 66 | Args: 67 | x (torch.Tensor): Input tensor of shape (batch_size, input_dim). 68 | 69 | Returns: 70 | torch.Tensor: Output of the MLP with modulated biases. 71 | """ 72 | 73 | noPoints, _ = x.shape 74 | 75 | # Check device alignment 76 | if x.device != self.device: 77 | warnings.warn(f"Input tensor was on {x.device}, but model is on {self.device}. " 78 | f"Input tensor has been moved to {self.device}. " 79 | "This may slow down computation. Consider moving your input tensor to the correct device before calling the model.", 80 | UserWarning) 81 | x = x.to(self.device) 82 | 83 | # Normalize inputs 84 | if self.normalize: 85 | x = 2.0 * (x - self.lb) / (self.ub - self.lb) - 1.0 86 | 87 | # Compute activation of first layer 88 | x = self.linear_layers[0](x) 89 | x = self.activation(x) 90 | 91 | # Compute modulation terms 92 | modulation_terms = self.modulation_network(x) # Shape: (batch_size, hidden_size * num_hidden) 93 | modulation_terms = torch.sigmoid(modulation_terms) 94 | 95 | modulation_terms = modulation_terms.view(-1, len(self.linear_layers) - 2, self.linear_layers[1].bias.size(0)) 96 | 97 | ''' 98 | # Forward pass with bias modulation 99 | # Start with i = 1 100 | for i in range(len(self.linear_layers) - 2): 101 | x = self.linear_layers[i+1](x) 102 | bias = self.linear_layers[i+1].bias + modulation_terms[:, i] 103 | x = self.activation(x + bias) # Apply modulated bias 104 | ''' 105 | 106 | # Forward pass with full weight modulation 107 | for i in range(len(self.linear_layers) - 2): 108 | wi = self.linear_layers[i + 1].weight # Shape: [out_features, in_features] 109 | bi = self.linear_layers[i + 1].bias # Shape: [out_features] 110 | modi = modulation_terms[:, i] # Shape: [batch_size, out_features] 111 | 112 | x = x * modi 113 | 114 | # Standard linear transformation 115 | x = x + torch.matmul(x, wi.T) + bi # Shape: [batch_size, out_features] 116 | 117 | # Activation 118 | x = self.activation(x) 119 | 120 | # Final layer 121 | x = self.linear_layers[-1](x) 122 | 123 | return x 124 | 125 | 126 | def to(self, device): 127 | super(ModulatedMLP, self).to(device) 128 | self.lb = self.lb.to(device) 129 | self.ub = self.ub.to(device) -------------------------------------------------------------------------------- /NeuralSolvers/models/moe_finger_test.py: -------------------------------------------------------------------------------- 1 | from NeuralSolvers.models import FingerMoE 2 | import numpy as np 3 | import torch 4 | 5 | if __name__ == "__main__": 6 | lb = np.array([0, 0, 0, 0]) 7 | ub = np.array([1, 1, 1, 1]) 8 | 9 | # finger MoE gpu 10 | moe = FingerMoE(4, 3, 5, 100, 2, lb, ub) 11 | moe.cuda() 12 | x_gpu = torch.randn(3, 4).cuda() 13 | y_gpu = moe(x_gpu) 14 | print(y_gpu) 15 | 16 | # finger MoE cpu 17 | moe.cpu() 18 | x_cpu = torch.randn(3, 4) 19 | y_cpu = moe(x_cpu) 20 | print(y_cpu) 21 | 22 | # non linear gating test 23 | moe = FingerMoE(4, 3, 5, 100, 2, lb, ub, non_linear=True) 24 | moe.cuda() 25 | x_gpu = torch.randn(3, 4).cuda() 26 | y_gpu = moe(x_gpu) 27 | print(y_gpu) 28 | moe.cpu() 29 | x_cpu = torch.randn(3, 4) 30 | y_cpu = moe(x_cpu) 31 | print(y_cpu) 32 | 33 | -------------------------------------------------------------------------------- /NeuralSolvers/models/moe_mlp_test.py: -------------------------------------------------------------------------------- 1 | from NeuralSolvers.models import MoE 2 | import numpy as np 3 | import torch 4 | 5 | if __name__ == "__main__": 6 | lb = np.array([0, 0, 0]) 7 | ub = np.array([1, 1, 1]) 8 | 9 | # linear gating test 10 | moe = MoE(3, 3, 5, 100, 2, lb, ub) 11 | moe.cuda() 12 | x_gpu = torch.randn(3, 3).cuda() 13 | y_gpu = moe(x_gpu) 14 | print(y_gpu) 15 | moe.cpu() 16 | x_cpu = torch.randn(3, 3) 17 | y_cpu = moe(x_cpu) 18 | print(y_cpu) 19 | 20 | # non linear gating test 21 | moe = MoE(3, 3, 5, 100, 2, lb, ub, non_linear=True) 22 | moe.cuda() 23 | x_gpu = torch.randn(3, 3).cuda() 24 | y_gpu = moe(x_gpu) 25 | print(y_gpu) 26 | moe.cpu() 27 | x_cpu = torch.randn(3, 3) 28 | y_cpu = moe(x_cpu) 29 | print(y_cpu) 30 | -------------------------------------------------------------------------------- /NeuralSolvers/models/pennesmodel.py: -------------------------------------------------------------------------------- 1 | from torch import randn 2 | from torch.nn.functional import relu 3 | from torch.autograd import Variable 4 | from torch.nn import Module, Parameter 5 | 6 | class PennesHPM(Module): 7 | """ 8 | Constructor of the Pennes model: 9 | du/dt = convection + linear(u) = (a_conv * Δu) + (w * u + b) 10 | Args: 11 | config (dict): dictionary defining the configuration of the model. 12 | keys: convection, linear_u. 13 | values: True (include term in the model) or False (do not include). 14 | u_blood (float): arterial blood temperature. 15 | """ 16 | def __init__(self, config, u_blood = 37., spat_res = 0.3, hs_net = None): 17 | super().__init__() 18 | self.config = config 19 | self.u_blood = u_blood 20 | self.spat_res = 0.3 21 | if config['convection']: 22 | self.a_conv = Parameter(Variable((randn([640,480]).clone().detach().requires_grad_(True)))) 23 | if config['linear_u']: 24 | self.a_linear_u_w = Parameter(Variable((randn([640,480]).clone().detach().requires_grad_(True)))) 25 | self.a_linear_u_b = Parameter(Variable((randn([640,480]).clone().detach().requires_grad_(True)))) 26 | if config['heat_source']: 27 | assert hs_net is not None 28 | self.hs_net = hs_net 29 | 30 | def convection(self, derivatives): 31 | """ 32 | Convection term of the model: 33 | convection = a_conv * Δu 34 | It is linear mapping of the convection term in the original equation. 35 | Args: 36 | derivatives(tensor): tensor of the form [x,y,t,u,u_xx,u_yy,u_t]. 37 | [0,1,2,3, 4 , 5 , 6 ]. 38 | """ 39 | u_xx = derivatives[:, 4].view(-1) 40 | u_yy = derivatives[:, 5].view(-1) 41 | x_indices = (derivatives[:, 0].view(-1) / self.spat_res).long() 42 | y_indices = (derivatives[:, 1].view(-1) / self.spat_res).long() 43 | a_conv = relu(self.a_conv[x_indices,y_indices].view(-1)) 44 | return a_conv * (u_xx + u_yy) 45 | 46 | def linear_u(self, derivatives): 47 | """ 48 | Linear term of the model: 49 | linear(u) = w * u + b 50 | It is linear mapping of the perfusion term in the original equation. 51 | Args: 52 | derivatives(tensor): tensor of the form [x,y,t,u,u_xx,u_yy,u_t]. 53 | """ 54 | x_indices = (derivatives[:, 0].view(-1) / self.spat_res).long() 55 | y_indices = (derivatives[:, 1].view(-1) / self.spat_res).long() 56 | u_values = derivatives[:, 3].view(-1) 57 | a_linear_u_w = self.a_linear_u_w[x_indices,y_indices].view(-1) 58 | a_linear_u_b = self.a_linear_u_b[x_indices,y_indices].view(-1) 59 | return a_linear_u_w*(u_values-self.u_blood) + a_linear_u_b 60 | 61 | def heat_source(self, derivatives): 62 | return heat_source(derivatives[:,:3]) 63 | 64 | def forward(self, derivatives): 65 | """ 66 | Forward pass of the model. 67 | Args: 68 | derivatives(tensor): tensor of the form [x,y,t,u,u_xx,u_yy,u_t]. 69 | where x,y,t - spatiotemporal coordinates in physical units; 70 | u, u_xx, u_yy, u_t - temprerature and its derivatives. 71 | """ 72 | predicted_u_t = 0 73 | if self.config['convection']: 74 | predicted_u_t += self.convection(derivatives) 75 | if self.config['linear_u']: 76 | predicted_u_t += self.linear_u(derivatives) 77 | return predicted_u_t 78 | 79 | def cuda(self): 80 | """ 81 | Sends the instance of the class to cuda device. 82 | """ 83 | super().cuda() 84 | if self.config['convection']: 85 | self.a_conv = self.a_conv.cuda() 86 | if self.config['linear_u']: 87 | self.a_linear_u_w = self.a_linear_u_w.cuda() 88 | self.a_linear_u_b = self.a_linear_u_b.cuda() 89 | 90 | -------------------------------------------------------------------------------- /NeuralSolvers/models/snake_mlp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import math 4 | from .mlp import MLP 5 | from .activations.snake import Snake 6 | 7 | class SnakeMLP(MLP): 8 | def __init__(self, input_size, output_size, hidden_size, num_hidden, lb, ub, frequency, normalize=True): 9 | super(MLP, self).__init__() 10 | self.linear_layers = nn.ModuleList() 11 | self.activation = nn.ModuleList() 12 | self.init_layers(input_size, output_size, hidden_size, num_hidden, frequency) 13 | self.lb = torch.tensor(lb).float() 14 | self.ub = torch.tensor(ub).float() 15 | self.normalize = normalize 16 | 17 | 18 | def init_layers(self, input_size, output_size, hidden_size, num_hidden, frequency): 19 | self.linear_layers.append(nn.Linear(input_size, hidden_size)) 20 | self.activation.append(Snake(frequency=frequency)) 21 | for _ in range(num_hidden): 22 | self.linear_layers.append(nn.Linear(hidden_size, hidden_size)) 23 | self.activation.append(Snake(frequency=frequency)) 24 | self.linear_layers.append(nn.Linear(hidden_size, output_size)) 25 | 26 | for m in self.linear_layers: 27 | if isinstance(m, nn.Linear): 28 | bound = math.sqrt(3 / m.weight.size()[0]) 29 | torch.nn.init.uniform_(m.weight, a=-bound, b=bound) 30 | nn.init.constant_(m.bias, 0) 31 | 32 | def forward(self, x): 33 | if self.normalize: 34 | x = 2.0*(x - self.lb)/(self.ub - self.lb) - 1.0 35 | for i in range(len(self.linear_layers) - 1): 36 | x = self.linear_layers[i](x) 37 | x = self.activation[i](x) 38 | x = self.linear_layers[-1](x) 39 | return x 40 | 41 | def cuda(self): 42 | super(SnakeMLP, self).cuda() 43 | self.lb = self.lb.cuda() 44 | self.ub = self.ub.cuda() 45 | 46 | def cpu(self): 47 | super(SnakeMLP, self).cpu() 48 | self.lb = self.lb.cpu() 49 | self.ub = self.ub.cpu() 50 | 51 | def to(self, device): 52 | super(SnakeMLP, self).to(device) 53 | self.lb = self.lb.to(device) 54 | self.ub = self.ub.to(device) 55 | -------------------------------------------------------------------------------- /NeuralSolvers/models/test_Finger_Net.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import torch 3 | 4 | from Finger_Net import FingerNet 5 | from torch.nn import Module 6 | 7 | 8 | 9 | class FingerNetTest(unittest.TestCase): 10 | 11 | def test_constructor(self): 12 | lb = [0, 0, 0] 13 | ub = [1, 1, 1] 14 | model = FingerNet(lb, ub, 3, 1) 15 | self.assertIsNotNone(model, "Model is not none") # add assertion here 16 | self.assertIsInstance(model, Module, "Model is instance of torch.nn.module ") 17 | del model 18 | 19 | def test_architecture(self): 20 | # test case with 1 finger 21 | InputSize = 1 22 | OutputSize = 1 23 | lb = [0] 24 | ub = [1] 25 | model = FingerNet(lb, ub, InputSize, OutputSize) 26 | self.assertEqual(len(model.finger_nets), InputSize) 27 | del model 28 | 29 | InputSize = 2 30 | OutputSize = 1 31 | lb = [0, 0, 0] 32 | ub = [1, 1, 1] 33 | model = FingerNet(lb, ub, InputSize , OutputSize) 34 | self.assertEqual(len(model.finger_nets), InputSize) 35 | del model 36 | 37 | InputSize = 10 38 | OutputSize = 1 39 | lb = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 40 | ub = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] 41 | model = FingerNet(lb, ub, InputSize , OutputSize) 42 | self.assertEqual(len(model.finger_nets),InputSize) 43 | del model 44 | 45 | # there are more things you can test here 46 | 47 | def test_device_movement(self): 48 | InputSize = 2 49 | OutputSize = 1 50 | lb = [0, 0] 51 | ub = [1, 1] 52 | model = FingerNet(lb, ub, InputSize, OutputSize) 53 | 54 | # moving model to cuda 55 | model.cuda() 56 | self.assertEqual(str(model.lb.device), 'cuda:0') 57 | self.assertEqual(str(model.ub.device), 'cuda:0') 58 | self.assertEqual(str(model.finger_nets[0][0].weight.device), 'cuda:0') 59 | self.assertEqual(str(model.lin_layers[0].weight.device), 'cuda:0') 60 | 61 | # moving model to cpu 62 | model.cpu() 63 | self.assertEqual(str(model.lb.device), 'cpu') 64 | self.assertEqual(str(model.ub.device), 'cpu') 65 | self.assertEqual(str(model.finger_nets[0][0].weight.device), 'cpu') 66 | self.assertEqual(str(model.lin_layers[0].weight.device), 'cpu') 67 | 68 | # use `to`-function to move it back to gpu 69 | model.to('cuda:0') 70 | self.assertEqual(str(model.lb.device), 'cuda:0') 71 | self.assertEqual(str(model.ub.device), 'cuda:0') 72 | self.assertEqual(str(model.finger_nets[0][0].weight.device), 'cuda:0') 73 | self.assertEqual(str(model.lin_layers[0].weight.device), 'cuda:0') 74 | del model 75 | 76 | def test_forward(self): 77 | # test forward on cpu 78 | InputSize = 2 79 | OutputSize = 1 80 | lb = [0, 0] 81 | ub = [1, 1] 82 | model = FingerNet(lb, ub, InputSize, OutputSize) 83 | x = torch.rand(10, InputSize) 84 | y = model(x) 85 | self.assertEqual(y.shape, (10, OutputSize)) 86 | 87 | # test on different input and output size 88 | InputSize = 3 89 | OutputSize = 3 90 | lb = [0, 0, 0] 91 | ub = [1, 1, 1] 92 | model = FingerNet(lb, ub, InputSize, OutputSize) 93 | x = torch.rand(10, InputSize) 94 | y = model(x) 95 | self.assertEqual(y.shape, (10, OutputSize)) 96 | 97 | # test forward on gpu 98 | InputSize = 3 99 | OutputSize = 3 100 | lb = [0, 0, 0] 101 | ub = [1, 1, 1] 102 | model = FingerNet(lb, ub, InputSize, OutputSize) 103 | model.cuda() 104 | x = torch.rand(10, InputSize ,device='cuda:0') 105 | y = model(x) 106 | self.assertEqual(y.shape, (10, OutputSize)) 107 | self.assertEqual(str(y.device), 'cuda:0') 108 | 109 | def test_parameter_function(self): 110 | InputSize = 3 111 | OutputSize = 3 112 | lb = [0, 0, 0] 113 | ub = [1, 1, 1] 114 | model = FingerNet(lb, ub, InputSize, OutputSize) 115 | num_parameter_entries = 0 116 | for _ in model.named_parameters(): num_parameter_entries +=1 117 | self.assertGreater(num_parameter_entries, 0, "Number of parameters is not empty") 118 | 119 | 120 | if __name__ == '__main__': 121 | unittest.main() 122 | -------------------------------------------------------------------------------- /NeuralSolvers/optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.optim as optim 3 | from torch.optim.optimizer import Optimizer, required 4 | 5 | 6 | 7 | class Lamb(Optimizer): 8 | r"""Implements Lamb algorithm. 9 | It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. 10 | Arguments: 11 | params (iterable): iterable of parameters to optimize or dicts defining 12 | parameter groups 13 | lr (float, optional): learning rate (default: 1e-3) 14 | betas (Tuple[float, float], optional): coefficients used for computing 15 | running averages of gradient and its square (default: (0.9, 0.999)) 16 | eps (float, optional): term added to the denominator to improve 17 | numerical stability (default: 1e-8) 18 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 19 | adam (bool, optional): always use trust ratio = 1, which turns this into 20 | Adam. Useful for comparison purposes. 21 | .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: 22 | https://arxiv.org/abs/1904.00962 23 | """ 24 | 25 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, 26 | weight_decay=0, adam=False): 27 | if not 0.0 <= lr: 28 | raise ValueError("Invalid learning rate: {}".format(lr)) 29 | if not 0.0 <= eps: 30 | raise ValueError("Invalid epsilon value: {}".format(eps)) 31 | if not 0.0 <= betas[0] < 1.0: 32 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) 33 | if not 0.0 <= betas[1] < 1.0: 34 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) 35 | defaults = dict(lr=lr, betas=betas, eps=eps, 36 | weight_decay=weight_decay) 37 | self.adam = adam 38 | super(Lamb, self).__init__(params, defaults) 39 | 40 | def step(self, closure=None): 41 | """Performs a single optimization step. 42 | Arguments: 43 | closure (callable, optional): A closure that reevaluates the model 44 | and returns the loss. 45 | """ 46 | loss = None 47 | if closure is not None: 48 | loss = closure() 49 | 50 | for group in self.param_groups: 51 | for p in group['params']: 52 | if p.grad is None: 53 | continue 54 | grad = p.grad.data 55 | if grad.is_sparse: 56 | raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') 57 | 58 | state = self.state[p] 59 | 60 | # State initialization 61 | if len(state) == 0: 62 | state['step'] = 0 63 | # Exponential moving average of gradient values 64 | state['exp_avg'] = torch.zeros_like(p.data) 65 | # Exponential moving average of squared gradient values 66 | state['exp_avg_sq'] = torch.zeros_like(p.data) 67 | 68 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 69 | beta1, beta2 = group['betas'] 70 | 71 | state['step'] += 1 72 | 73 | # Decay the first and second moment rufing average coefficient 74 | # m_t 75 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 76 | # v_t 77 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 78 | 79 | # Paper v3 does not use debiasing. 80 | # bias_correction1 = 1 - beta1 ** state['step'] 81 | # bias_correction2 = 1 - beta2 ** state['step'] 82 | # Apply bias to lr to avoid broadcast. 83 | step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1 84 | 85 | weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) 86 | 87 | adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) 88 | if group['weight_decay'] != 0: 89 | adam_step.add_(group['weight_decay'], p.data) 90 | 91 | adam_norm = adam_step.pow(2).sum().sqrt() 92 | if weight_norm == 0 or adam_norm == 0: 93 | trust_ratio = 1 94 | else: 95 | trust_ratio = weight_norm / adam_norm 96 | state['weight_norm'] = weight_norm 97 | state['adam_norm'] = adam_norm 98 | state['trust_ratio'] = trust_ratio 99 | if self.adam: 100 | trust_ratio = 1 101 | 102 | p.data.add_(-step_size * trust_ratio, adam_step) 103 | 104 | return loss 105 | 106 | 107 | """ Layer-wise adaptive rate scaling for SGD in PyTorch! """ 108 | class LARS(Optimizer): 109 | r"""Implements layer-wise adaptive rate scaling for SGD. 110 | Args: 111 | params (iterable): iterable of parameters to optimize or dicts defining 112 | parameter groups 113 | lr (float): base learning rate (\gamma_0) 114 | momentum (float, optional): momentum factor (default: 0) ("m") 115 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 116 | ("\beta") 117 | eta (float, optional): LARS coefficient 118 | max_epoch: maximum training epoch to determine polynomial LR decay. 119 | Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg. 120 | Large Batch Training of Convolutional Networks: 121 | https://arxiv.org/abs/1708.03888 122 | Example: 123 | >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3) 124 | >>> optimizer.zero_grad() 125 | >>> loss_fn(model(input), target).backward() 126 | >>> optimizer.step() 127 | """ 128 | def __init__(self, params, lr=required, momentum=.9, 129 | weight_decay=.0005, eta=0.001, max_epoch=200): 130 | if lr is not required and lr < 0.0: 131 | raise ValueError("Invalid learning rate: {}".format(lr)) 132 | if momentum < 0.0: 133 | raise ValueError("Invalid momentum value: {}".format(momentum)) 134 | if weight_decay < 0.0: 135 | raise ValueError("Invalid weight_decay value: {}" 136 | .format(weight_decay)) 137 | if eta < 0.0: 138 | raise ValueError("Invalid LARS coefficient value: {}".format(eta)) 139 | 140 | self.epoch = 0 141 | defaults = dict(lr=lr, momentum=momentum, 142 | weight_decay=weight_decay, 143 | eta=eta, max_epoch=max_epoch) 144 | super(LARS, self).__init__(params, defaults) 145 | 146 | def step(self, epoch=None, closure=None): 147 | """Performs a single optimization step. 148 | Arguments: 149 | closure (callable, optional): A closure that reevaluates the model 150 | and returns the loss. 151 | epoch: current epoch to calculate polynomial LR decay schedule. 152 | if None, uses self.epoch and increments it. 153 | """ 154 | loss = None 155 | if closure is not None: 156 | loss = closure() 157 | 158 | if epoch is None: 159 | epoch = self.epoch 160 | self.epoch += 1 161 | 162 | for group in self.param_groups: 163 | weight_decay = group['weight_decay'] 164 | momentum = group['momentum'] 165 | eta = group['eta'] 166 | lr = group['lr'] 167 | max_epoch = group['max_epoch'] 168 | 169 | for p in group['params']: 170 | if p.grad is None: 171 | continue 172 | 173 | param_state = self.state[p] 174 | d_p = p.grad.data 175 | 176 | weight_norm = torch.norm(p.data) 177 | grad_norm = torch.norm(d_p) 178 | 179 | # Global LR computed on polynomial decay schedule 180 | decay = (1 - float(epoch) / max_epoch) ** 2 181 | global_lr = lr * decay 182 | 183 | # Compute local learning rate for this layer 184 | local_lr = eta * weight_norm / \ 185 | (grad_norm + weight_decay * weight_norm) 186 | 187 | # Update the momentum term 188 | actual_lr = local_lr * global_lr 189 | 190 | if 'momentum_buffer' not in param_state: 191 | buf = param_state['momentum_buffer'] = \ 192 | torch.zeros_like(p.data) 193 | else: 194 | buf = param_state['momentum_buffer'] 195 | buf.mul_(momentum).add_(actual_lr, d_p + weight_decay * p.data) 196 | p.data.add_(-buf) 197 | 198 | return loss -------------------------------------------------------------------------------- /NeuralSolvers/pde_library.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import grad 3 | from torch import ones, stack 4 | 5 | 6 | def burgers1D(params): 7 | viscosity = params["viscosity"] 8 | 9 | def pde(x, u): 10 | grads = ones(u.shape, device=u.device) 11 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 12 | u_x, u_t = grad_u[:, 0], grad_u[:, 1] 13 | 14 | grads_x = ones(u_x.shape, device=u.device) 15 | u_xx = grad(grad_u[:, 0], x, create_graph=True, grad_outputs=grads_x)[0][:, 0] 16 | 17 | # reshape for correct behavior of the optimizer 18 | u_x = u_x.reshape(-1, 1) 19 | u_t = u_t.reshape(-1, 1) 20 | u_xx = u_xx.reshape(-1, 1) 21 | 22 | return u_t + u * u_x - viscosity * u_xx 23 | 24 | return pde 25 | 26 | def wave1D(params): 27 | wave_speed = params["wave_speed"] 28 | 29 | def pde(x, u): 30 | grads = ones(u.shape, device=u.device) 31 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 32 | u_x, u_t = grad_u[:, 0], grad_u[:, 1] 33 | 34 | grads_x = ones(u_x.shape, device=u.device) 35 | u_xx = grad(grad_u[:, 0], x, create_graph=True, grad_outputs=grads_x)[0][:, 0] 36 | 37 | grads_t = ones(u_t.shape, device=u.device) 38 | u_tt = grad(grad_u[:, 1], x, create_graph=True, grad_outputs=grads_t)[0][:, 1] 39 | 40 | return u_tt - wave_speed**2 * u_xx 41 | 42 | return pde 43 | 44 | def schrodinger1D(params): 45 | def pde(x, u): 46 | u_real, u_imag = u[:, 0], u[:, 1] 47 | 48 | grads = ones(u_real.shape, device=u.device) 49 | grad_u_real = grad(u_real, x, create_graph=True, grad_outputs=grads)[0] 50 | grad_u_imag = grad(u_imag, x, create_graph=True, grad_outputs=grads)[0] 51 | 52 | u_real_x, u_real_t = grad_u_real[:, 0], grad_u_real[:, 1] 53 | u_imag_x, u_imag_t = grad_u_imag[:, 0], grad_u_imag[:, 1] 54 | 55 | u_real_xx = grad(u_real_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 56 | u_imag_xx = grad(u_imag_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 57 | 58 | f_real = u_real_t + 0.5 * u_imag_xx + (u_real ** 2 + u_imag ** 2) * u_imag 59 | f_imag = u_imag_t - 0.5 * u_real_xx - (u_real ** 2 + u_imag ** 2) * u_real 60 | 61 | return stack([f_real, f_imag], 1) 62 | 63 | return pde 64 | 65 | def heat1D(params): 66 | diffusivity = params["diffusivity"] 67 | def pde(x, u): 68 | grads = ones(u.shape, device=u.device) 69 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 70 | u_x, u_t = grad_u[:, 0], grad_u[:, 1] 71 | 72 | grads = ones(u_x.shape, device=u.device) 73 | u_xx = grad(u_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 74 | 75 | u_x, u_t, u_xx = [tensor.reshape(-1, 1) for tensor in (u_x, u_t, u_xx)] 76 | 77 | return u_t - u_xx 78 | 79 | return pde -------------------------------------------------------------------------------- /NeuralSolvers/pinn/HPMLoss.py: -------------------------------------------------------------------------------- 1 | from .PDELoss import PDELoss 2 | 3 | class HPMLoss(PDELoss): 4 | def __init__(self, geometry, name, hpm_input, hpm_model, norm='L2', weight=1.): 5 | """ 6 | Constructor of the HPM loss 7 | 8 | Args: 9 | geometry: instance of the geometry class that defines the domain 10 | hpm_input(function): function that calculates the needed input for the HPM model. The hpm_input function 11 | should return a list of tensors, where the last entry is the time_derivative 12 | hpm_model (torch.nn.Module): model for the HPM, represents the underlying PDE 13 | norm: Norm used for calculation PDE loss 14 | weight: Weighting for the loss term 15 | """ 16 | super(HPMLoss, self).__init__(geometry, None, name, norm='L2', weight=1.) 17 | self.hpm_input = hpm_input 18 | self.hpm_model = hpm_model 19 | 20 | def __call__(self, x, model, **kwargs): 21 | """ 22 | Calculation of the HPM Loss 23 | Args: 24 | x(torch.Tensor): residual points 25 | model(torch.nn.module): model representing the solution 26 | """ 27 | x.requires_grad = True 28 | prediction_u = model(x) 29 | hpm_input = self.hpm_input(x, prediction_u) 30 | time_derivative = hpm_input[:, -1].reshape(-1,1) 31 | input = hpm_input[:, :-1] 32 | hpm_output = self.hpm_model(input) 33 | return self.norm(time_derivative, hpm_output) -------------------------------------------------------------------------------- /NeuralSolvers/pinn/PDELoss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor as Tensor 3 | from torch.nn import Module as Module 4 | from NeuralSolvers.LossTerm import LossTerm 5 | from NeuralSolvers.samplers.Adaptive_Sampler import AdaptiveSampler 6 | 7 | 8 | class PDELoss(LossTerm): 9 | def __init__(self, geometry, pde, name, norm='L2', weight=1.): 10 | """ 11 | Constructor of the PDE Loss 12 | 13 | Args: 14 | geometry: instance of the geometry class that defines the domain 15 | pde (function): function that represents residual of the PDE 16 | norm: Norm used for calculation PDE loss 17 | weight: Weighting for the loss term 18 | """ 19 | super(PDELoss, self).__init__(geometry, name, norm, weight) 20 | self.geometry = geometry 21 | self.pde = pde 22 | 23 | 24 | def __call__(self, x: Tensor, model: Module, **kwargs): 25 | """ 26 | Call function of the PDE loss. Calculates the norm of the PDE residual 27 | 28 | x: residual points 29 | model: model that predicts the solution of the PDE 30 | """ 31 | 32 | if isinstance(self.geometry.sampler, AdaptiveSampler): 33 | w = x[:,-1:] 34 | x = x[:,:-1] 35 | 36 | x.requires_grad = True # setting requires grad to true in order to calculate 37 | u = model.forward(x) 38 | pde_residual = self.pde(x, u, **kwargs) 39 | 40 | if isinstance(self.geometry.sampler, AdaptiveSampler): 41 | return 1 / self.geometry.batch_size * torch.mean(1 / w * pde_residual ** 2) 42 | else: 43 | zeros = torch.zeros(pde_residual.shape, device=pde_residual.device) 44 | return self.norm(pde_residual, zeros) -------------------------------------------------------------------------------- /NeuralSolvers/pinn/__init__.py: -------------------------------------------------------------------------------- 1 | from .PINN import PINN 2 | from .HPMLoss import HPMLoss 3 | from .PDELoss import PDELoss 4 | 5 | import NeuralSolvers.pinn.datasets 6 | 7 | __all__ = [ 8 | 'PDELoss', 9 | 'HPMLoss', 10 | 'PINN', 11 | ] -------------------------------------------------------------------------------- /NeuralSolvers/pinn/datasets/BoundaryCondition.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset 2 | 3 | from NeuralSolvers.LossTerm import LossTerm 4 | from torch.autograd import grad 5 | from torch import ones, Tensor 6 | import numpy as np 7 | 8 | 9 | class BoundaryConditionDataset1D(Dataset): 10 | def __init__(self, nb, lower_bound, upper_bound, is_lower, device): 11 | super().__init__() 12 | max_t = 2 13 | t = np.linspace(0, max_t, 200).flatten()[:, None] 14 | idx_t = np.random.choice(t.shape[0], nb, replace=False) 15 | tb = t[idx_t, :] 16 | x_val = lower_bound[0] if is_lower else upper_bound[0] 17 | self.x_b = Tensor(np.concatenate((np.full_like(tb, x_val), tb), 1)).float().to(device) 18 | 19 | def __len__(self): 20 | return 1 21 | 22 | def __getitem__(self, idx): 23 | return self.x_b 24 | 25 | 26 | class BoundaryCondition(LossTerm): 27 | def __init__(self, dataset, name, norm='L2', weight=1.): 28 | super(BoundaryCondition, self).__init__(dataset, name, norm, weight) 29 | 30 | def __call__(self, *args, **kwargs): 31 | raise NotImplementedError("The call function of the Boundary Condition has to be implemented") 32 | 33 | 34 | class DirichletBC(BoundaryCondition): 35 | """ 36 | Dirichlet boundary conditions: y(x) = func(x). 37 | """ 38 | 39 | def __init__(self, func, dataset, name, norm='L2',weight=1.): 40 | super(DirichletBC, self).__init__(dataset, name, norm, weight) 41 | self.func = func 42 | 43 | def __call__(self, x, model): 44 | prediction = model(x) # is equal to y 45 | return self.norm(prediction, self.func(x)) 46 | 47 | 48 | class NeumannBC(BoundaryCondition): 49 | """ 50 | Neumann boundary conditions: dy/dn(x) = func(x). 51 | 52 | With dy/dn(x) = <∇y,n> 53 | """ 54 | 55 | def __init__(self, func, dataset, normal_vector, begin, end, output_dimension, name, norm='L2', weight=1.): 56 | """ 57 | Args: 58 | func: scalar but vectorized function f(x) 59 | normal_vector: normal vector for the face 60 | name: identifier of the boundary condition 61 | weight: weighting of the boundary condition 62 | begin: defines the begin of spatial variables in x 63 | end: defines the end of the spatial domain in x 64 | output_dimension defines on which dimension of the output the boundary condition performed 65 | """ 66 | super(NeumannBC, self).__init__(dataset, name, norm, weight) 67 | self.func = func 68 | self.normal_vector = normal_vector 69 | self.begin = begin 70 | self.end = end 71 | self.output_dimension = output_dimension 72 | 73 | def __call__(self, x, model): 74 | x.requires_grad = True 75 | y = model(x) 76 | y = y[:, self.output_dimension] 77 | grads = ones(y.shape, device=y.device) 78 | grad_y = grad(y, x, create_graph=True, grad_outputs=grads)[0] 79 | grad_y = grad_y[:,self.begin:self.end] 80 | self.normal_vector.to(y.device) # move normal vector to the correct device 81 | y_dn = grad_y @ self.normal_vector 82 | return self.norm(y_dn, self.func(x)) 83 | 84 | 85 | class RobinBC(BoundaryCondition): 86 | """ 87 | Robin boundary conditions: dy/dn(x) = func(x, y). 88 | """ 89 | 90 | def __init__(self, func, dataset, normal_vector, begin, end, output_dimension, name, norm='L2', weight=1.): 91 | """ 92 | Args: 93 | func: scalar but vectorized function f(x,y) 94 | normal_vector: normal vector for the face 95 | name: identifier of the boundary condition 96 | weight: weighting of the boundary condition 97 | begin: defines the begin of spatial variables in x 98 | end: defines the end of the spatial domain in x 99 | output_dimension defines on which dimension of the output the boundary condition performed 100 | """ 101 | 102 | super(RobinBC, self).__init__(dataset, name, norm, weight) 103 | self.func = func 104 | self.begin = begin 105 | self.end = end 106 | self.normal_vector = normal_vector 107 | self.output_dimension = output_dimension 108 | 109 | def __call__(self, x, y, model): 110 | x.requires_grad = True 111 | y = model(x) 112 | y = y[:, self.output_dimension] 113 | grads = ones(y.shape, device=y.device) 114 | grad_y = grad(y, x, create_graph=True, grad_outputs=grads)[0] 115 | grad_y = grad_y[:, self.begin:self.end] 116 | self.normal_vector.to(y.device) # move normal vector to the correct device 117 | y_dn = grad_y @ self.normal_vector 118 | return self.norm(y_dn, self.func(x, y)) 119 | 120 | 121 | class PeriodicBC(BoundaryCondition): 122 | """ 123 | Periodic boundary condition 124 | """ 125 | 126 | def __init__(self, dataset, output_dimension, name, degree=None, input_dimension=None, norm='L2', weight=1.): 127 | super(PeriodicBC, self).__init__(dataset, name, norm, weight) 128 | if degree is not None and input_dimension is None: 129 | raise ValueError("If the degree of the boundary condition is defined the input dimension for the " 130 | "derivative has to be defined too ") 131 | self.input_dimension = input_dimension 132 | self.output_dimension = output_dimension 133 | self.degree = degree 134 | 135 | def __call__(self, x_lb, x_ub, model): 136 | x_lb.requires_grad = True 137 | x_ub.requires_grad = True 138 | y_lb = model(x_lb)[:, self.output_dimension] 139 | y_ub = model(x_ub)[:, self.output_dimension] 140 | grads = ones(y_lb.shape, device=y_ub.device) 141 | if self.degree is None: 142 | return self.weight * self.norm(y_lb, y_ub) 143 | elif self.degree == 1: 144 | y_lb_grad = grad(y_lb, x_lb, create_graph=True, grad_outputs=grads)[0] 145 | y_ub_grad = grad(y_ub, x_ub, create_graph=True, grad_outputs=grads)[0] 146 | y_lb_dn = y_lb_grad[:, self.input_dimension] 147 | y_ub_dn = y_ub_grad[:, self.input_dimension] 148 | return self.weight * self.norm(y_lb_dn, y_ub_dn) 149 | 150 | else: 151 | raise NotImplementedError("Periodic Boundary Condition for a higher degree than one is not supported") 152 | 153 | 154 | class TimeDerivativeBC(BoundaryCondition): 155 | """ 156 | For hyperbolic systems it may be needed to initialize the time derivative. This boundary condition intializes 157 | the time derivative in a data driven way. 158 | 159 | """ 160 | def __init__(self, dataset, name, norm='L2', weight=1): 161 | super(TimeDerivativeBC, self).__init__(dataset, name, norm, weight) 162 | 163 | def __call__(self, x, dt_y, model): 164 | x.requires_grad = True 165 | pred = model(x) 166 | grads = ones(pred.shape, device=pred.device) 167 | pred_dt = grad(pred, x, create_graph=True, grad_outputs=grads)[0][:, -1] 168 | pred_dt = pred_dt.reshape(-1, 1) 169 | return self.norm(pred_dt, dt_y) 170 | 171 | -------------------------------------------------------------------------------- /NeuralSolvers/pinn/datasets/InitalCondition.py: -------------------------------------------------------------------------------- 1 | from NeuralSolvers.LossTerm import LossTerm 2 | from torch import Tensor 3 | from torch.nn import Module 4 | 5 | 6 | class InitialCondition(LossTerm): 7 | def __init__(self, dataset, name, norm='L2', weight=1.): 8 | """ 9 | Constructor for the Initial condition 10 | 11 | Args: 12 | dataset (torch.utils.Dataset): dataset that provides the residual points 13 | norm: Norm used for calculation PDE loss 14 | weight: Weighting for the loss term 15 | """ 16 | super(InitialCondition, self).__init__(dataset, name, norm, weight) 17 | 18 | def __call__(self, x: Tensor, model: Module, gt_y: Tensor): 19 | """ 20 | This function returns the loss for the initial condition 21 | L_0 = norm(model(x), gt_y) 22 | 23 | Args: 24 | x (Tensor) : position of initial condition 25 | model (Module): model that represents the solution 26 | gt_y (Tensor): ground true values for the initial state 27 | """ 28 | prediction = model(x) 29 | return self.norm(prediction, gt_y) 30 | -------------------------------------------------------------------------------- /NeuralSolvers/pinn/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .BoundaryCondition import PeriodicBC 2 | from .BoundaryCondition import DirichletBC 3 | from .BoundaryCondition import RobinBC 4 | from .BoundaryCondition import TimeDerivativeBC 5 | from .BoundaryCondition import NeumannBC 6 | from .BoundaryCondition import BoundaryConditionDataset1D 7 | from .InitalCondition import InitialCondition 8 | 9 | __all__ = [ 10 | 'InitialCondition', 11 | 'PeriodicBC', 12 | 'DirichletBC', 13 | 'RobinBC', 14 | 'NeumannBC', 15 | 'TimeDerivativeBC', 16 | 'BoundaryConditionDataset1D' 17 | ] -------------------------------------------------------------------------------- /NeuralSolvers/samplers/Adaptive_Sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from NeuralSolvers.samplers.Sampler import Sampler 4 | 5 | 6 | class AdaptiveSampler(Sampler): 7 | def __init__(self, n_seed, model, pde, device = torch.device("cuda")): 8 | """ 9 | Constructor of the AdaptiveSampler class 10 | 11 | Args: 12 | n_seed (int): the number of seed points for adaptive sampling. 13 | model: is the model which is trained to represent the underlying PDE. 14 | pde (function): function that represents residual of the PDE. 15 | device (torch.device): "cuda" or "cpu". 16 | """ 17 | self.n_seed = n_seed 18 | self.model = model 19 | self.pde = pde 20 | self.device = device 21 | super(AdaptiveSampler, self).__init__() 22 | 23 | def sample(self, lb, ub, n): 24 | """ 25 | Generate a tuple of 'n' sampled points in [lb,ub] and corresponding weights. 26 | 27 | Args: 28 | lb (numpy.ndarray): lower bound of the domain. 29 | ub (numpy.ndarray): upper bound of the domain. 30 | n (int): the number of sampled points. 31 | """ 32 | 33 | torch.manual_seed(42) 34 | np.random.seed(42) 35 | 36 | lb = lb.reshape(1,-1) 37 | ub = ub.reshape(1,-1) 38 | 39 | dimension = lb.shape[1] 40 | xs = np.random.uniform(lb, ub, size=(self.n_seed, dimension)) 41 | 42 | # collocation points 43 | xf = np.random.uniform(lb, ub, size=(n, dimension)) 44 | 45 | # make the points into tensors 46 | xf = torch.tensor(xf).float().to(self.device) 47 | xs = torch.tensor(xs).float().to(self.device) 48 | 49 | # prediction with seed points 50 | xs.requires_grad = True 51 | prediction_seed = self.model(xs) 52 | 53 | # pde residual with seed points 54 | loss_seed = self.pde(xs, prediction_seed) 55 | losses_xf = torch.zeros_like(xf) 56 | 57 | # Compute the 2-norm distance between seed points and collocation points 58 | dist = torch.cdist(xf, xs, p=2) 59 | 60 | # obtain the smallest element of the given tensor 61 | knn = dist.topk(1, largest=False) 62 | 63 | # assign the seed loss to the loss of the closest collocation points 64 | losses_xf = loss_seed[knn.indices[:, 0]] 65 | 66 | # apply softmax function 67 | q_model = torch.softmax(losses_xf, dim=0) 68 | 69 | # obtain 'n' indices sampled from the multinomial distribution 70 | indicies_new = torch.multinomial(q_model[:, 0], n, replacement=True) 71 | 72 | # collocation points and corresponding weights 73 | xf = xf[indicies_new] 74 | weight = q_model[indicies_new].detach() 75 | weight = torch.mean(weight, 1, True) 76 | 77 | return xf, weight -------------------------------------------------------------------------------- /NeuralSolvers/samplers/LHS_Sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from pyDOE import lhs 4 | from NeuralSolvers.samplers.Sampler import Sampler 5 | 6 | 7 | class LHSSampler(Sampler): 8 | def __init__(self, device = 'cpu'): 9 | """ 10 | Constructor of the LHSSampler class 11 | """ 12 | super(LHSSampler, self).__init__() 13 | self.device = device 14 | 15 | def sample(self, lb, ub, n): 16 | """Generate 'n' number of sample points in [lb,ub] 17 | 18 | Args: 19 | lb (numpy.ndarray): lower bound of the domain. 20 | ub (numpy.ndarray): upper bound of the domain. 21 | n (int): the number of sampled points. 22 | """ 23 | 24 | torch.manual_seed(42) 25 | np.random.seed(42) 26 | 27 | lb = lb.reshape(1,-1) 28 | ub = ub.reshape(1,-1) 29 | 30 | dimension = lb.shape[1] 31 | xf = lb + (ub - lb) * lhs(dimension, n) 32 | xf_torch = torch.tensor(xf).float().to(self.device) 33 | 34 | 35 | return xf_torch -------------------------------------------------------------------------------- /NeuralSolvers/samplers/Random_Sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from NeuralSolvers.samplers.Sampler import Sampler 4 | 5 | 6 | class RandomSampler(Sampler): 7 | def __init__(self): 8 | """ 9 | Constructor of the RandomSampler (pseudo random sampler) class 10 | """ 11 | super(RandomSampler, self).__init__() 12 | 13 | def sample(self, lb, ub, n): 14 | """Generate 'n' number of sample points in [lb,ub] 15 | 16 | Args: 17 | lb (numpy.ndarray): lower bound of the domain. 18 | ub (numpy.ndarray): upper bound of the domain. 19 | n (int): the number of sampled points. 20 | """ 21 | 22 | torch.manual_seed(42) 23 | np.random.seed(42) 24 | 25 | lb = lb.reshape(1,-1) 26 | ub = ub.reshape(1,-1) 27 | 28 | dimension = lb.shape[1] 29 | xf = np.random.uniform(lb,ub,size=(n, dimension)) 30 | return torch.tensor(xf).float() -------------------------------------------------------------------------------- /NeuralSolvers/samplers/Sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from abc import ABC, abstractmethod 4 | 5 | class Sampler(ABC): 6 | def __init__(self): 7 | """ 8 | Constructor of the Sampler class 9 | """ 10 | 11 | @abstractmethod 12 | def sample(self, lb, ub, n): 13 | """Generate 'n' number of sample points in [lb,ub] 14 | 15 | Args: 16 | lb (numpy.ndarray): lower bound of the domain. 17 | ub (numpy.ndarray): upper bound of the domain. 18 | n (int): the number of sampled points. 19 | """ 20 | -------------------------------------------------------------------------------- /NeuralSolvers/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .Adaptive_Sampler import AdaptiveSampler 2 | from .LHS_Sampler import LHSSampler 3 | from .Random_Sampler import RandomSampler 4 | 5 | __all__ = [ 6 | 'RandomSampler', 7 | 'LHSSampler', 8 | 'AdaptiveSampler', 9 | ] 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | Neural Solvers Logo 3 |
4 | 5 | 6 | **Neural Solvers** is a framework for solving partial differential equations (PDEs) and inverse problems using physics-informed neural networks (PINNs) at scale. It leverages data parallelism to accelerate training, making it highly scalable. 7 | 8 | ## 1. Key features 9 | 10 | - Implements physics-informed neural networks at scale 11 | - Uses data parallelism for significant speedup in multi-GPU environments 12 | - Supports various PDE types and boundary conditions 13 | - Integrates with Horovod for distributed training 14 | - Offers logging support with Wandb and TensorBoard 15 | 16 | 17 |
18 | 19 |
20 | 21 | 22 | ## 2. Quick Start 23 | 24 | ### 2.1 Installation 25 | 26 | ```bash 27 | git clone git@github.com:Photon-AI-Research/NeuralSolvers.git 28 | cd NeuralSolvers 29 | pip install . 30 | ``` 31 | 32 | ### 2.2 Getting Started with 1D Burgers' Equation Example 33 | 34 | To help you get started with Neural Solvers, we've provided an example that solves the 1D Burgers' Equation. This example demonstrates the basic usage of the library and serves as a good starting point for understanding how to set up and solve PDEs using our framework. 35 | 36 | The `1D_Burgers_Equation.py` script in the `examples` directory provides a complete example of how to: 37 | 38 | 1. Define the problem domain 39 | 2. Set up the neural network model 40 | 3. Define initial and boundary conditions 41 | 4. Create the PINN (Physics-Informed Neural Network) 42 | 5. Train the model 43 | 6. Visualize the results 44 | 45 | To run this example: 46 | 47 | ```bash 48 | python examples/Burgers_Equation_1d/Burgers_Equation.py 49 | ``` 50 | 51 | We encourage you to explore this example to get a feel for how Neural Solvers works. You can use it as a template for solving your own PDEs or as a reference for understanding the library's structure. 52 | 53 | For more advanced usage, please refer to the next sections below. 54 | 55 | ## 3. Basic Usage 56 | Here's a simple example of how to use the PINN class: 57 | 58 | ```python 59 | import NeuralSolvers as nsolv 60 | 61 | # Set up your problem 62 | pde_loss = nsolv.PDELoss(...) 63 | initial_condition = nsolv.InitialCondition(...) 64 | boundary_condition = nsolv.DirichletBC(...) 65 | 66 | # Create and train the PINN 67 | pinn = nsolv.PINN(model, input_dim, output_dim, pde_loss, initial_condition, boundary_condition) 68 | pinn.fit(epochs=1000, learning_rate=1e-3) 69 | 70 | # Use the trained PINN 71 | solution = pinn(x_test) 72 | ``` 73 | 74 | In the following, we will be diving into details of each step. 75 | 76 | ### 3.1. Define datasets for co-location points 77 | 78 | ```python 79 | import NeuralSolvers as nsolv 80 | from torch.utils.data import Dataset 81 | 82 | class PDEDataset(Dataset): 83 | def __init__(self, nf, lb, ub): 84 | # Initialize your PDE dataset 85 | pass 86 | 87 | def __len__(self): 88 | # Return length of the dataset 89 | pass 90 | 91 | def __getitem__(self, idx): 92 | # Return item at given index 93 | pass 94 | # Similarly, define InitialConditionDataset and BoundaryConditionDataset 95 | 96 | ``` 97 | 98 | ### 3.2. Set up your problem 99 | 100 | ```python 101 | # Create datasets 102 | pde_dataset = PDEDataset(...) 103 | ic_dataset = InitialConditionDataset(...) 104 | bc_dataset = BoundaryConditionDataset(...) 105 | 106 | # Define initial and boundary conditions 107 | initial_condition = nsolv.InitialCondition(dataset=ic_dataset) 108 | boundary_condition = nsolv.DirichletBC(dataset=bc_dataset) 109 | 110 | # Define PDE residual function 111 | def pde_residual(x, u): 112 | # Define your PDE here 113 | pass 114 | 115 | pde_loss = nsolv.PDELoss(dataset=pde_dataset, func=pde_residual) 116 | ``` 117 | 118 | ### 3.3. Create and train your model: 119 | 120 | ```python 121 | # Define the neural network model 122 | model = nsolv.models.MLP(input_size=2, output_size=1, hidden_size=50, num_hidden=4) 123 | 124 | # Create PINN 125 | pinn = nsolv.PINN( 126 | model, 127 | input_size=2, 128 | output_size=1, 129 | pde_loss=pde_loss, 130 | initial_condition=initial_condition, 131 | boundary_condition=[boundary_condition], 132 | use_gpu=True 133 | ) 134 | 135 | # Train the model 136 | pinn.fit(epochs=10000, optimizer='Adam', lr=1e-3) 137 | ``` 138 | 139 | ## 4. Benchmark runner 140 | 141 | Neural Solvers includes a benchmark runner that allows you to easily compare different PDE solvers and model architectures. This tool is particularly useful for evaluating the performance of various configurations across different types of PDEs. 142 | 143 | ### 4.1. Using the Benchmark Runner 144 | 145 | The benchmark runner is located in the `benchmarks` directory and can be run from the command line. Here's how to use it: 146 | 147 | ```bash 148 | python benchmarks/benchmark_runner.py --system --model --epochs 149 | 150 | ``` 151 | 152 | Where: 153 | 154 | - `` can be one of: "burgers", "heat", "schrodinger", or "wave" 155 | - `` can be either "MLP" or "ModulatedMLP" 156 | - `` is the number of training epochs (default is 1000) 157 | 158 | For example: 159 | 160 | ```bash 161 | python benchmarks/benchmark_runner.py --system burgers --model MLP --epochs 1000 162 | ``` 163 | 164 | This command will train a PINN to solve the Burgers equation using an MLP architecture for 1000 epochs 165 | 166 | ### 4.2. Configuring Benchmarks 167 | 168 | The benchmark configurations are defined in `benchmarks/configs.py`. Each PDE system has its own configuration, including: 169 | 170 | - Domain boundaries 171 | - PDE function and parameters 172 | - Initial and boundary conditions 173 | - Model architecture details 174 | 175 | You can modify these configurations or add new ones to test different scenarios. 176 | 177 | ### 4.3. Visualizing Results 178 | After training, the benchmark runner will automatically plot the solution predicted by the PINN. This allows for quick visual inspection of the results. 179 | 180 | ### 4.4. Extending the Benchmark Runner 181 | The benchmark runner is designed to be extensible. You can add new PDE systems, model architectures, or evaluation metrics by modifying the appropriate files in the benchmarks directory. 182 | 183 | This benchmarking tool provides a standardized way to compare different approaches and configurations, making it easier to evaluate and improve your PDE solving techniques using Neural Solvers. 184 | 185 | ## 5. Advanced features 186 | 187 | ### 5.1. Deep HPM support 188 | 189 | Instead of a PDE loss you can use a HPM model. The HPM model needs a function derivatives that calculates the needed derivatives, while the last returned derivative is the time_derivative. 190 | You can use the HPM loss a follows. 191 | 192 | ```python 193 | 194 | def derivatives(x,u): 195 | """ 196 | Returns the derivatives 197 | 198 | Args: 199 | x (torch.Tensor) : residual points 200 | u (torch.Tensor) : predictions of the pde model 201 | """ 202 | pass 203 | 204 | hpm_loss = nsolv.HPMLoss(pde_dataset,derivatives,hpm_model) 205 | #HPM has no boundary conditions in general 206 | pinn = nsolv.PINN(model, input_size=2, output_size=2 ,pde_loss = hpm_loss, initial_condition=initial_condition, boundary_condition = [], use_gpu=True) 207 | 208 | ``` 209 | 210 | 211 | ### 5.2. Horovod support 212 | Enable distributed training with Horovod: 213 | 214 | ```python 215 | pinn = nsolv.PINN(..., use_horovod=True) 216 | ``` 217 | 218 | Note: LBFGS optimizer is not supported with Horovod. 219 | 220 | ### 5.3. Logging 221 | Use Wandb or TensorBoard for logging: 222 | 223 | ```python 224 | # Wandb 225 | logger = nsolv.WandbLogger(project_name, args) 226 | 227 | # TensorBoard 228 | logger = nsolv.TensorBoardLogger(log_directory) 229 | 230 | pinn.fit(epochs=5000, logger=logger) 231 | ``` 232 | 233 | ### 5.4 Adaptive Sampling 234 | 235 | Neural Solvers now supports adaptive sampling to focus computational resources on regions of high error: 236 | 237 | ```python 238 | sampler = nsolv.AdaptiveSampler(num_points, model, pde_function) 239 | geometry = nsolv.NDCube(lb, ub, num_points, num_points, sampler, device=device) 240 | ``` 241 | 242 | 243 | ### 5.5 Implemented Approaches: 244 | 245 | - P. Stiller, F. Bethke, M. Böhme, R. Pausch, S. Torge, A. Debus, J. Vorberger, M.Bussmann, N. Hoffmann: 246 | Large-scale Neural Solvers for Partial Differential Equations (2020). 247 | 248 | 249 | - Raissi, Maziar, Paris Perdikaris, and George Em Karniadakis. 250 | Physics Informed Deep Learning (Part I): Data-driven Solutions of Nonlinear Partial Differential Equations.(2017). 251 | 252 | - Raissi, Maziar, Paris Perdikaris, and George Em Karniadakis. 253 | Physics Informed Deep Learning (Part II): Data-driven Discovery of Nonlinear Partial Differential Equations.(2017). 254 | 255 | - Suryanarayana Maddu, Dominik Sturm, Christian L. Müller and Ivo F. Sbalzarini (2021): 256 | Inverse Dirichlet Weighting Enables Reliable Training of Physics Informed Neural Networks 257 | 258 | 259 | - Sifan Wang, Yujun Teng, Paris Perdikaris (2020) 260 | Understanding and mitigating gradient pathologies in physics-informed neural networks 261 | 262 | - Mohammad Amin Nabian, Rini Jasmine Gladstone, Hadi Meidani (2021) 263 | efficient training of physics-informed neural networks via importance sampling 264 | 265 | 266 | ## 6. Citation 267 | If you use Neural Solvers in your research, please cite: 268 | 269 | ``` 270 | P. Stiller, F. Bethke, M. Böhme, R. Pausch, S. Torge, A. Debus, J. Vorberger, M.Bussmann, N. Hoffmann: 271 | Large-scale Neural Solvers for Partial Differential Equations (2020). 272 | ``` 273 | 274 | 275 | ## 7. Develoeprs 276 | 277 | - Nico Hoffmann (SAXONY.ai)
278 | - Patrick Stiller (HZDR)
279 | - Maksim Zhdanov (HZDR)
280 | - Jeyhun Rustamov (HZDR)
281 | - Raj Dhansukhbhai Sutariya (HZDR)
282 | -------------------------------------------------------------------------------- /benchmarks/benchmark_runner.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import importlib 3 | import numpy as np 4 | import torch 5 | import os 6 | import matplotlib.pyplot as plt 7 | 8 | from NeuralSolvers.pinn.datasets import BoundaryConditionDataset1D 9 | from configs import CONFIGS, MODELS, SYSTEM, PDE_FUNCTIONS 10 | import NeuralSolvers as nsolv 11 | from datasets import InitialConditionDataset 12 | 13 | def load_model(model_name, model_args): 14 | """ 15 | Dynamically load a model class and return an instance. 16 | 17 | Args: 18 | model_name (str): Name of the model class to load. 19 | model_args (dict): Arguments to pass to the model constructor. 20 | 21 | Returns: 22 | torch.nn.Module: An instance of the selected model. 23 | """ 24 | module_name, class_name = MODELS[model_name].rsplit(".", 1) 25 | module = importlib.import_module(module_name) 26 | model_class = getattr(module, class_name) 27 | return model_class(**model_args) 28 | 29 | def setup_pinn(system_name, model_name="MLP"): 30 | """ 31 | Set up the PINN for a given PDE system and model. 32 | 33 | Args: 34 | system_name (str): The name of the PDE system. 35 | model_name (str): The name of the model architecture. 36 | 37 | Returns: 38 | nsolv.PINN: Configured PINN instance. 39 | """ 40 | config = CONFIGS[system_name] 41 | domain = config["domain"] 42 | device = SYSTEM['device'] 43 | boundary_conditions = config["boundary_conditions"] 44 | pinn_boundary_conditions = [] 45 | for bc in boundary_conditions: 46 | bc_vals = boundary_conditions[bc] 47 | 48 | if "custom_dataset" in bc_vals: 49 | module_name, class_name = bc_vals["custom_dataset"].rsplit(".", 1) 50 | module = importlib.import_module(module_name) 51 | CustomDataset = getattr(module, class_name) 52 | bc_dataset = CustomDataset(**bc_vals["custom_dataset_parameters"], lower_bound=domain[0], upper_bound=domain[1], device=device) 53 | else: 54 | bc_dataset = BoundaryConditionDataset1D(nb=bc_vals['nb'], is_lower=bc_vals['is_lower'], 55 | lower_bound=domain[0], upper_bound=domain[1], 56 | device=device) 57 | 58 | if "custom_boundary" in bc_vals: 59 | module_name, class_name = bc_vals["custom_boundary"].rsplit(".", 1) 60 | module = importlib.import_module(module_name) 61 | CustomDataset = getattr(module, class_name) 62 | bc = CustomDataset(**bc_vals["custom_boundary_parameters"], dataset=bc_dataset) 63 | else: 64 | bc = nsolv.pinn.datasets.DirichletBC(bc_vals['func'], bc_dataset, name=bc) 65 | 66 | pinn_boundary_conditions.append(bc) 67 | 68 | if "custom_dataset" in config["initial_condition"]: 69 | module_name, class_name = config["initial_condition"]["custom_dataset"].rsplit(".", 1) 70 | module = importlib.import_module(module_name) 71 | CustomDataset = getattr(module, class_name) 72 | ic_dataset = CustomDataset(**config["initial_condition"]["parameters"], device=device) 73 | else: 74 | # Use the default InitialConditionDataset 75 | ic_dataset = InitialConditionDataset( 76 | n0=config["initial_condition"]['n0'], 77 | initial_func=config["initial_condition"]['u0'], 78 | domain=domain, 79 | device=device 80 | ) 81 | 82 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name="IC") 83 | 84 | 85 | # Use the PDE closure with params 86 | pde_function = PDE_FUNCTIONS[config["pde_function"]](config["pde_parameters"]) 87 | 88 | # Geometry and PDE Loss 89 | pde_loss = nsolv.pinn.PDELoss( 90 | nsolv.NDCube(domain[0], domain[1], config["num_collocation_points"], config["num_collocation_points"], 91 | nsolv.samplers.LHSSampler(), device=device), 92 | pde_function, 93 | name = "PDE" 94 | ) 95 | 96 | # Model Arguments 97 | model_args = config["model_args"] 98 | model_args.update({ 99 | "lb": domain[0], 100 | "ub": domain[1], 101 | "device": device 102 | }) 103 | 104 | # Load the selected model 105 | model = load_model(model_name, model_args) 106 | 107 | # Initialize PINN 108 | return nsolv.PINN(model, model_args["input_size"], model_args["output_size"], pde_loss, 109 | initial_condition, 110 | pinn_boundary_conditions, device=device 111 | ) 112 | 113 | def train_and_benchmark(system_name, model_name, num_epochs=1000): 114 | """ 115 | Train and benchmark the PINN. 116 | 117 | Args: 118 | system_name (str): Name of the PDE system. 119 | model_name (str): Name of the model architecture. 120 | num_epochs (int): Number of training epochs. 121 | """ 122 | config = CONFIGS[system_name] 123 | 124 | pinn = setup_pinn(system_name, model_name) 125 | pinn.fit(num_epochs, pretraining=config["initial_condition"]["pretrain"], 126 | lbfgs_finetuning=False) 127 | print(f"Finished training for {system_name} using {model_name}.") 128 | return pinn 129 | 130 | def plot_pinn_solution(pinn, system_name, model_name, output_dir='results'): 131 | """ 132 | Plot the solution predicted by the PINN. 133 | 134 | Args: 135 | pinn: Trained PINN model. 136 | system_name (str): Name of the PDE system. 137 | """ 138 | config = CONFIGS[system_name] 139 | x = np.linspace(config["domain"][0][0], config["domain"][1][0], 100) 140 | t = np.linspace(config["domain"][0][1], config["domain"][1][1], 100) 141 | X, T = np.meshgrid(x, t) 142 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 143 | 144 | pred = pinn(torch.Tensor(X_star).to(SYSTEM['device'])).detach().cpu().numpy() 145 | # check pred if it got multiple output dimensions (eg. real+imaginary part) 146 | #.reshape(X.shape)) 147 | 148 | spatial_extent, output_dimensions = pred.shape 149 | 150 | 151 | os.makedirs(output_dir, exist_ok=True) 152 | np.save(os.path.join(output_dir, f"{system_name}_{model_name}_prediction.npy"), pred) 153 | np.save(os.path.join(output_dir, f"{system_name}_{model_name}_coordinates.npy"), X_star) 154 | 155 | 156 | for i in range(output_dimensions): 157 | pred_i = pred[:,i].reshape(X.shape) 158 | 159 | plt.imshow(pred_i.T, origin='lower', extent=[t.min(), t.max(), x.min(), x.max()]) 160 | plt.title(f"{system_name} Solution of Dimension {i}") 161 | plt.colorbar() 162 | 163 | # Save the plot 164 | filename = f"{system_name}_{model_name}_solution_dim_{i}.png" 165 | plt.savefig(os.path.join(output_dir, filename)) 166 | plt.close() 167 | 168 | if(output_dimensions == 2): 169 | sol_u = pred[:,0].reshape(X.shape) 170 | sol_v = pred[:, 1].reshape(X.shape) 171 | 172 | sol_pow = np.sqrt(sol_u ** 2 + sol_v ** 2) 173 | 174 | plt.imshow(sol_pow.T, origin='lower', extent=[t.min(), t.max(), x.min(), x.max()]) 175 | plt.title(f"{system_name} Solution (power)") 176 | plt.colorbar() 177 | filename = f"{system_name}_{model_name}_solution_power.png" 178 | plt.savefig(os.path.join(output_dir, filename)) 179 | plt.close() 180 | 181 | def main(): 182 | # Argument parser to select system and model 183 | parser = argparse.ArgumentParser(description="PINN Benchmark Runner") 184 | parser.add_argument("--system", type=str, required=True, 185 | choices=["burgers", "heat", "schrodinger", "wave"], 186 | help="PDE system to solve: burgers, heat, schrodinger, wave,") 187 | parser.add_argument("--model", type=str, required=True, 188 | choices=["MLP", "ModulatedMLP"], 189 | help="Model architecture to use: MLP, ModulatedMLP, CustomMLP") 190 | parser.add_argument("--epochs", type=int, default=1000, 191 | help="Number of training epochs") 192 | args = parser.parse_args() 193 | 194 | # Train and benchmark the selected system and model 195 | pinn = train_and_benchmark(system_name=args.system, model_name=args.model, num_epochs=args.epochs) 196 | plot_pinn_solution(pinn, system_name=args.system, model_name=args.model) 197 | 198 | if __name__ == "__main__": 199 | main() -------------------------------------------------------------------------------- /benchmarks/configs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from NeuralSolvers import * 4 | 5 | # Configuration for PDE systems 6 | CONFIGS = { 7 | "burgers": { 8 | "name": "Burgers Equation", 9 | "domain": np.array([[-1, 0.0], [1.0, 1.0]]), # Spatial and temporal bounds 10 | "pde_function": "burgers1D", # Identifier for the PDE function 11 | "pde_parameters": { 12 | "viscosity": 0.01 / np.pi # Burgers' equation viscosity 13 | }, 14 | "num_collocation_points": 10000, 15 | "initial_condition": { 16 | "custom_dataset": "examples.Burgers_Equation_1d.Burgers_Equation.InitialConditionDataset", 17 | "parameters": { 18 | "n0": 100, 19 | "file_path": "../examples/Burgers_Equation_1d/burgers_shock.mat" 20 | }, 21 | "pretrain": False 22 | }, 23 | "boundary_conditions": { 24 | }, 25 | "model_args": { 26 | "input_size": 2, 27 | "output_size": 1, 28 | "hidden_size": 40, 29 | "num_hidden": 8, 30 | "activation": torch.tanh 31 | } 32 | }, 33 | 34 | "schrodinger": { 35 | "name": "Schrödinger Equation", 36 | "domain": np.array([[-5, 0.0], [5.0, np.pi / 2]]), # Spatial and temporal bounds 37 | "pde_function": "schrodinger1D", # Identifier for the PDE function 38 | "pde_parameters": { 39 | "potential": "harmonic" # Example: harmonic potential 40 | }, 41 | "num_collocation_points": 20000, 42 | "initial_condition": { 43 | "custom_dataset": "examples.Schroedinger_1d.Schroedinger.InitialConditionDataset", 44 | "parameters": { 45 | "n0": 50, 46 | "file_path": "../examples/Schroedinger_1d/NLS.mat" 47 | }, 48 | "pretrain": False 49 | }, 50 | "boundary_conditions": { 51 | "u_periodic": { 52 | "custom_dataset": "examples.Schroedinger_1d.Schroedinger.BoundaryConditionDataset", 53 | "custom_dataset_parameters": { 54 | "nb": 50, 55 | "file_path": "../examples/Schroedinger_1d/NLS.mat" 56 | }, 57 | "custom_boundary": "NeuralSolvers.pinn.datasets.PeriodicBC", 58 | "custom_boundary_parameters": { 59 | "output_dimension": 0, 60 | "name": "u periodic" 61 | }, 62 | }, 63 | "v_periodic": { 64 | "custom_dataset": "examples.Schroedinger_1d.Schroedinger.BoundaryConditionDataset", 65 | "custom_dataset_parameters": { 66 | "nb": 50, 67 | "file_path": "../examples/Schroedinger_1d/NLS.mat" 68 | }, 69 | "custom_boundary": "NeuralSolvers.pinn.datasets.PeriodicBC", 70 | "custom_boundary_parameters": { 71 | "output_dimension": 1, 72 | "name": "v periodic" 73 | }, 74 | }, 75 | "ux_periodic": { 76 | "custom_dataset": "examples.Schroedinger_1d.Schroedinger.BoundaryConditionDataset", 77 | "custom_dataset_parameters": { 78 | "nb": 50, 79 | "file_path": "../examples/Schroedinger_1d/NLS.mat" 80 | }, 81 | "custom_boundary": "NeuralSolvers.pinn.datasets.PeriodicBC", 82 | "custom_boundary_parameters": { 83 | "name": "u_x periodic", 84 | "degree": 1, 85 | "input_dimension": 0, 86 | "output_dimension": 0 87 | 88 | }, 89 | }, 90 | "vx_periodic": { 91 | "custom_dataset": "examples.Schroedinger_1d.Schroedinger.BoundaryConditionDataset", 92 | "custom_dataset_parameters": { 93 | "nb": 50, 94 | "file_path": "../examples/Schroedinger_1d/NLS.mat" 95 | }, 96 | "custom_boundary": "NeuralSolvers.pinn.datasets.PeriodicBC", 97 | "custom_boundary_parameters": { 98 | "name": "v_x periodic", 99 | "degree": 1, 100 | "input_dimension": 0, 101 | "output_dimension": 1 102 | }, 103 | } 104 | }, 105 | "model_args": { 106 | "input_size": 2, 107 | "output_size": 2, 108 | "hidden_size": 100, 109 | "num_hidden": 4, 110 | "activation": torch.tanh 111 | } 112 | }, 113 | 114 | "heat": { 115 | "name": "Heat Equation", 116 | "domain": np.array([[0.0, 0.0], [1.0, 2.0]]), # Spatial and temporal bounds 117 | "pde_function": "heat1D", # Identifier for the PDE function 118 | "num_collocation_points": 20000, 119 | "pde_parameters": { 120 | "diffusivity": 1.0 # Thermal diffusivity (assumed to be 1 in this case) 121 | }, 122 | "initial_condition": { 123 | "u0": lambda x: np.sin(np.pi * x), 124 | "n0": 50, 125 | "pretrain": True 126 | }, 127 | "boundary_conditions": { 128 | "DC_upper": { 129 | "nb": 100, 130 | "func": dirichlet, 131 | "is_lower": False 132 | }, 133 | "DC_lower": { 134 | "nb": 100, 135 | "func": dirichlet, 136 | "is_lower": True 137 | } 138 | }, 139 | "model_args": { 140 | "input_size": 2, 141 | "output_size": 1, 142 | "hidden_size": 100, 143 | "num_hidden": 4, 144 | "activation": torch.tanh 145 | } 146 | }, 147 | 148 | "wave": { 149 | "name": "Wave Equation", 150 | "domain": np.array([[-1, 0.0], [1.0, 1.0]]), # Spatial and temporal bounds 151 | "pde_function": "wave1D", # Identifier for the PDE function 152 | "pde_parameters": { 153 | "wave_speed": 1.0 # Wave propagation speed 154 | }, 155 | "num_collocation_points": 10000, 156 | "initial_condition": { 157 | "u0": lambda x: np.sin(np.pi * x), 158 | "n0": 100, 159 | "pretrain": True 160 | }, 161 | "boundary_conditions": {}, 162 | "model_args": { 163 | "input_size": 2, 164 | "output_size": 1, 165 | "hidden_size": 40, 166 | "num_hidden": 8 167 | } 168 | } 169 | } 170 | 171 | # Available models: mapping of model names to their paths and class names 172 | MODELS = { 173 | "MLP": "NeuralSolvers.models.mlp.MLP", 174 | "ModulatedMLP": "NeuralSolvers.models.modulated_mlp.ModulatedMLP", 175 | } 176 | 177 | # Additional parameters 178 | SYSTEM = { 179 | "device": "mps", 180 | } 181 | 182 | PDE_FUNCTIONS = { 183 | "burgers1D": burgers1D, 184 | "wave1D": wave1D, 185 | "schrodinger1D": schrodinger1D, 186 | "heat1D": heat1D 187 | } -------------------------------------------------------------------------------- /benchmarks/datasets.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | class InitialConditionDataset(torch.utils.data.Dataset): 5 | """Generalized Initial Condition Dataset.""" 6 | 7 | def __init__(self, n0, initial_func, domain, device='cpu'): 8 | x = np.linspace(domain[0][0], domain[1][0], n0)[:, None] 9 | u0 = initial_func(x) 10 | self.X_u_train = torch.Tensor(np.hstack((x, np.zeros_like(x)))).to(device) 11 | self.u_train = torch.Tensor(u0).to(device) 12 | 13 | def __len__(self): 14 | return 1 15 | 16 | def __getitem__(self, idx): 17 | return self.X_u_train.float(), self.u_train.float() -------------------------------------------------------------------------------- /examples/Bioheat_2d/2D_Bioheat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from Datasets import * 3 | from argparse import ArgumentParser 4 | import NeuralSolvers as nsolv 5 | 6 | DEVICE = "gpu" 7 | 8 | if __name__ == "__main__": 9 | 10 | parser = ArgumentParser() 11 | parser.add_argument("--identifier", type=str, default="UKD_DeepHPM") 12 | parser.add_argument("--pData", type=str, default="") 13 | parser.add_argument("--epochs", type=int, default=1000) 14 | parser.add_argument("--nt", type=int, default=1000) 15 | parser.add_argument("--timeStep", type=int, default=25) 16 | parser.add_argument("--batchSize", type=int, default=512) 17 | parser.add_argument("--numBatches", type=int, default=2e5) 18 | parser.add_argument("--hidden_size", type=int, default=500) 19 | parser.add_argument("--num_hidden", type=int, default=8) 20 | parser.add_argument("--hidden_size_alpha", type=int, default=500) 21 | parser.add_argument("--num_hidden_alpha", type=int, default=8) 22 | parser.add_argument("--hidden_size_hs", type=int, default=500) 23 | parser.add_argument("--num_hidden_hs", type=int, default=8) 24 | args = parser.parse_args() 25 | 26 | # Initial condition 27 | ic_dataset = InitialConditionDataset( 28 | pData=args.pData, 29 | batchSize=args.batchSize, 30 | numBatches=args.numBatches, 31 | nt=args.nt, 32 | timeStep=args.timeStep) 33 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name = "IC Bioheat") 34 | 35 | # PDE dataset 36 | pde_dataset = PDEDataset( 37 | pData=args.pData, 38 | seg_mask=ic_dataset.seg_mask, 39 | batchSize=args.batchSize, 40 | numBatches=args.numBatches, 41 | nt=args.nt, 42 | timeStep=args.timeStep, 43 | t_ub=ic_dataset.cSystem["t_ub"]) 44 | 45 | # Thermal diffusivity model 46 | # Input: spatiotemporal coordinates of a point x,y,t 47 | # Output: thermal diffusivity value for the point 48 | alpha_net = nsolv.models.MLP(input_size=3, 49 | output_size=1, 50 | hidden_size=args.hidden_size_alpha, 51 | num_hidden=args.num_hidden_alpha, 52 | lb=ic_dataset.lb[:3], #lb for x,y,t 53 | ub=ic_dataset.ub[:3]) #ub for x,y,t 54 | 55 | # Heat source model - part of du/dt that cannot be explained by conduction 56 | # Input: spatiotemporal coordinates of a point x,y,t 57 | # Output: heat source value for the point 58 | heat_source_net = nsolv.models.MLP(input_size=3, 59 | output_size=1, 60 | hidden_size=args.hidden_size_hs, 61 | num_hidden=args.num_hidden_hs, 62 | lb=ic_dataset.lb[:3], #lb for x,y,t 63 | ub=ic_dataset.ub[:3]) #ub for x,y,t 64 | 65 | # PINN model 66 | # Input: spatiotemporal coordinates of a point x,y,t 67 | # Output: temperature u at the point 68 | model = nsolv.models.MLP(input_size=3, 69 | output_size=1, 70 | hidden_size=args.hidden_size, 71 | num_hidden=args.num_hidden, 72 | lb=ic_dataset.lb[:3], #lb for x,y,t 73 | ub=ic_dataset.ub[:3]) #ub for x,y,t 74 | # HPM model: du/dt = alpha*(u_xx + u_yy) + heat_source 75 | # Initialization: alpha model, heat source model 76 | # Forward pass input: output of the derivatives function for a point x,y,t 77 | # Forward pass output: du/dt value for the point 78 | hpm_model = nsolv.models.MultiModelHPM(alpha_net, heat_source_net) 79 | hpm_loss = nsolv.pinn.HPMLoss(pde_dataset, derivatives, hpm_model) 80 | pinn = nsolv.PINN( 81 | model, 82 | input_dimension=6, 83 | output_dimension=1, 84 | pde_loss=hpm_loss, 85 | initial_condition=initial_condition, 86 | boundary_condition=None, 87 | device=DEVICE) 88 | 89 | pinn.fit(args.epochs, 'Adam', 1e-6) 90 | -------------------------------------------------------------------------------- /examples/Bioheat_2d/Datasets.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import h5py 4 | import torch 5 | import numpy as np 6 | from skimage.restoration import denoise_bilateral 7 | from skimage.filters import sobel 8 | from skimage.segmentation import watershed 9 | from scipy.ndimage import binary_fill_holes 10 | from argparse import ArgumentParser 11 | from torch.utils.data import Dataset 12 | 13 | class BoundaryConditionDataset(Dataset): 14 | 15 | def __init__(self, nb, lb, ub): 16 | """ 17 | Constructor of the initial condition dataset 18 | """ 19 | raise NotImplementedError 20 | 21 | def __getitem__(self, idx): 22 | """ 23 | Returns data for initial state 24 | """ 25 | raise NotImplementedError 26 | 27 | def __len__(self): 28 | """ 29 | Length of the dataset 30 | """ 31 | raise NotImplementedError 32 | 33 | 34 | class InitialConditionDataset(Dataset): 35 | 36 | @staticmethod 37 | def get2DGrid(nx, ny): 38 | """ 39 | Create a vector with all postions of a 2D grid (nx X ny ) 40 | """ 41 | x = np.arange(0, nx, 1) 42 | y = np.arange(0, ny, 1) 43 | 44 | xGrid, yGrid = np.meshgrid(x, y) 45 | 46 | posX = xGrid.reshape(-1) 47 | posY = yGrid.reshape(-1) 48 | 49 | return posX, posY 50 | 51 | @staticmethod 52 | def get3DGrid(nx, ny, nt): 53 | """ 54 | Create a vector with all postions of a 3D grid (nx X ny X nt) 55 | """ 56 | x = np.arange(0, nx, 1) 57 | y = np.arange(0, ny, 1) 58 | t = np.arange(0, nt, 1) 59 | 60 | xGrid, yGrid, tGrid = np.meshgrid(x, y, t) 61 | 62 | posX = xGrid.reshape(-1) 63 | posY = yGrid.reshape(-1) 64 | posT = tGrid.reshape(-1) 65 | 66 | return posX, posY, posT 67 | 68 | @staticmethod 69 | def getInput(pData, tPoint, cSystem, spatRes=0.3): 70 | """ 71 | Get the spatiotemporal coordinates for a specific time point tPoint 72 | The function returns a list of grid points appended with time t (nx X ny X nt) 73 | pData: path to a dataset 74 | cSystem: dictionary storing information about the dataset 75 | spatRes: spatial resolution of an image in the dataset [mm/pixel] 76 | """ 77 | # Upload an image at time point tPoint 78 | # hf = {'seq': thermal data at t, 'timing': t x image length} 79 | hf = h5py.File(pData + str(tPoint) + '.h5', 'r') 80 | t = np.array(hf['timing'][0]) 81 | hf.close() 82 | 83 | # Get spatial grid for an image 84 | posX, posY = InitialConditionDataset.get2DGrid( 85 | cSystem["nx"], cSystem["ny"]) 86 | 87 | size = cSystem["nx"] * cSystem["ny"] # number of pixels in an image 88 | posT = np.zeros(size) + t 89 | 90 | # Convert indices to physical quantities 91 | posX = posX * spatRes 92 | posY = posY * spatRes 93 | 94 | return posX, posY, posT 95 | 96 | @staticmethod 97 | def segmentation(pData, tPoint, nx, ny, threshold=32.4): 98 | """ 99 | Calculate segmentation mask for the brain cortex depicted at time point tPoint 100 | pData: path to a dataset 101 | cSystem: dictionary storing information about the dataset 102 | Segmentation is calculated based on watershed algorithm with predefined threshold 103 | """ 104 | # Upload an image at time point tPoint 105 | # hf = {'seq': thermal data at t, 'timing': t x image length} 106 | hf = h5py.File(pData + str(tPoint) + '.h5', 'r') 107 | value = np.array(hf['seq'][:]) 108 | hf.close() 109 | 110 | # Reshape image from 1D array to 2D array 111 | value = np.array(value).reshape(-1) 112 | value = value.reshape(nx, ny) 113 | 114 | # Apply bilateralFilter to improve segmentation quality 115 | value = denoise_bilateral( 116 | value, 117 | sigma_color=5, 118 | sigma_spatial=5, 119 | multichannel=False) 120 | 121 | # Segmentation algorithm 122 | elevation_map = sobel(value) 123 | markers = np.zeros_like(value) 124 | markers[value > threshold] = 2 125 | markers[value <= threshold] = 1 126 | segmentation = watershed(elevation_map, markers) 127 | segmentation = binary_fill_holes(segmentation - 1) 128 | segmentation = np.array(segmentation, dtype=np.int) 129 | 130 | return segmentation 131 | 132 | @staticmethod 133 | def loadFrame(pData, tPoint): 134 | """ 135 | Upload an image from dataset at time point tPoint 136 | """ 137 | if not os.path.exists(pData): # Check if given path to data is valid 138 | raise FileNotFoundError('Could not find file' + pData) 139 | 140 | # hf = {'seq': thermal data at t, 'timing': t x image length} 141 | hf = h5py.File(pData + str(tPoint) + '.h5', 'r') 142 | value = np.array(hf['seq'][:]) 143 | timing = np.array(hf['timing'][:]) 144 | hf.close() 145 | 146 | return value, timing 147 | 148 | def __init__(self, pData, batchSize, numBatches, nt, timeStep, nx=640, 149 | ny=480, pixStep=4, shuffle=True, useGPU=False): 150 | """ 151 | Constructor of the initial condition dataset 152 | __getitem()__ returns a batch with x,y,t to compute u_predicted value at as well as u_exact 153 | """ 154 | self.u = [] # thermal data 155 | self.x = [] 156 | self.y = [] 157 | self.t = [] 158 | 159 | if not os.path.exists(pData): # Check if given path to data is valid 160 | raise FileNotFoundError('Could not find file' + pData) 161 | 162 | # Find out the last time point which data is presented at 163 | hf = h5py.File(pData + str(nt - 1) + '.h5', 'r') 164 | tmax = np.array(hf['timing'][0]) 165 | hf.close() 166 | 167 | self.seg_mask = self.segmentation( 168 | pData=pData, tPoint=0, nx=nx, ny=ny) # segmentation mask 169 | 170 | for tPoint in range( 171 | 0, nt, timeStep): # load each timeStep-th frame from the dataset 172 | # Upload an image from dataset at time point tPoint 173 | Exact_u, timing = self.loadFrame(pData, tPoint) 174 | Exact_u = Exact_u.reshape( 175 | nx, ny) * self.seg_mask # apply segmentation 176 | for xi in range( 177 | 0, nx, pixStep): # sample only each pixStep-th spatial point from an image 178 | for yi in range(0, ny, pixStep): 179 | if Exact_u[xi, yi] != 0: # neglect non-cortex data 180 | self.u.append(Exact_u[xi, yi]) 181 | self.x.append(xi) 182 | self.y.append(yi) 183 | self.t.append(timing) 184 | 185 | # Convert python lists to numpy arrays 186 | self.u = np.array(self.u).reshape(-1) 187 | self.x = np.array(self.x).reshape(-1) 188 | self.y = np.array(self.y).reshape(-1) 189 | self.t = np.array(self.t).reshape(-1) 190 | 191 | print(len(self.x)) 192 | 193 | # Sometimes we are loading less files than we specified by batchsize + numBatches 194 | # => adapt numBatches to real number of batches for avoiding empty batches 195 | self.batchSize = batchSize 196 | print("batchSize: %d" % (self.batchSize)) 197 | self.numSamples = min((numBatches * batchSize, len(self.x))) 198 | print("numSamples: %d" % (self.numSamples)) 199 | self.numBatches = self.numSamples // self.batchSize 200 | print("numBatches: %d" % (self.numBatches)) 201 | self.randomState = np.random.RandomState(seed=1234) 202 | 203 | # Create dictionary with information about the dataset 204 | self.cSystem = { 205 | "x_lb": self.x.min(), 206 | "x_ub": self.x.max(), 207 | "y_lb": self.y.min(), 208 | "y_ub": self.y.max(), 209 | "nx": nx, 210 | "ny": ny, 211 | "nt": nt, 212 | "t_ub": self.t.max()} 213 | 214 | # Convert indices to physical quantities [mm] 215 | self.x = self.x * 0.25 216 | self.y = self.y * 0.25 217 | 218 | # Boundaries of spatiotemporal domain 219 | self.lb = np.array([self.x.min(), self.y.min(), self.t.min()]) 220 | self.ub = np.array([self.x.max(), self.y.max(), self.t.max()]) 221 | 222 | if (useGPU): # send to GPU if requested 223 | self.dtype = torch.cuda.FloatTensor 224 | self.dtype2 = torch.cuda.LongTensor 225 | else: 226 | self.dtype = torch.FloatTensor 227 | self.dtype2 = torch.LongTensor 228 | 229 | if shuffle: # shuffle the whole dataset if requested 230 | # Generate random permutation idx 231 | randIdx = self.randomState.permutation(self.x.shape[0]) 232 | 233 | # Use random index 234 | self.x = self.x[randIdx] 235 | self.y = self.y[randIdx] 236 | self.t = self.t[randIdx] 237 | self.u = self.u[randIdx] 238 | 239 | # Slice the array for training 240 | self.x = self.dtype(self.x[:self.numSamples]) 241 | self.y = self.dtype(self.y[:self.numSamples]) 242 | self.t = self.dtype(self.t[:self.numSamples]) 243 | self.u = self.dtype(self.u[:self.numSamples]) 244 | 245 | def __len__(self): 246 | """ 247 | Length of the dataset 248 | """ 249 | return self.numBatches 250 | 251 | def __getitem__(self, index): 252 | """ 253 | Returns item at given index 254 | """ 255 | # Generate batch for inital solution 256 | x = (self.x[index * self.batchSize: (index + 1) * self.batchSize]) 257 | y = (self.y[index * self.batchSize: (index + 1) * self.batchSize]) 258 | t = (self.t[index * self.batchSize: (index + 1) * self.batchSize]) 259 | u = (self.u[index * self.batchSize: (index + 1) * self.batchSize]) 260 | return torch.stack([x, y, t], 1), u 261 | 262 | 263 | class PDEDataset(Dataset): 264 | def __init__(self, pData, seg_mask, batchSize, numBatches, t_ub, nt, timeStep, nx=640, 265 | ny=480, pixStep=4, shuffle=True, useGPU=False): 266 | """ 267 | Constructor of the residual points dataset 268 | __getitem()__ returns a batch with x,y,t points to compute residuals at 269 | """ 270 | self.x = [] 271 | self.y = [] 272 | self.t = [] 273 | 274 | hf = h5py.File(pData + str(0) + '.h5', 'r') 275 | Exact_u = np.array(hf['seq'][:]) 276 | hf.close() 277 | 278 | Exact_u = Exact_u.reshape(nx, ny) * seg_mask # apply segmentation 279 | 280 | for tPoint in range( 281 | 0, nt, timeStep): # load each timeStep-th frame from the dataset 282 | # Upload an image from dataset at time point tPoint 283 | for xi in range( 284 | 0, nx, pixStep): # sample only each pixStep-th spatial point from an image 285 | for yi in range(0, ny, pixStep): 286 | if Exact_u[xi, yi] != 0: # neglect non-cortex data 287 | self.x.append(xi) 288 | self.y.append(yi) 289 | self.t.append(tPoint) 290 | 291 | # Convert python lists to numpy arrays 292 | self.x = np.array(self.x).reshape(-1) 293 | self.y = np.array(self.y).reshape(-1) 294 | self.t = np.array(self.t).reshape(-1) 295 | 296 | # Sometimes we are loading less files than we specified by batchsize + numBatches 297 | # => adapt numBatches to real number of batches for avoiding empty batches 298 | self.batchSize = batchSize 299 | print("batchSize: %d" % (self.batchSize)) 300 | self.numSamples = min((numBatches * batchSize, len(self.x))) 301 | print("numSamples: %d" % (self.numSamples)) 302 | self.numBatches = self.numSamples // self.batchSize 303 | print("numBatches: %d" % (self.numBatches)) 304 | self.randomState = np.random.RandomState(seed=1234) 305 | 306 | # Convert indices to physical quantities [mm] & [s] 307 | self.x = self.x * 0.25 308 | self.y = self.y * 0.25 309 | self.t = t_ub * self.t / nt 310 | 311 | if (useGPU): # send to GPU if requested 312 | self.dtype = torch.cuda.FloatTensor 313 | self.dtype2 = torch.cuda.LongTensor 314 | else: 315 | self.dtype = torch.FloatTensor 316 | self.dtype2 = torch.LongTensor 317 | 318 | if shuffle: # shuffle the whole dataset if requested 319 | # Generate random permutation idx 320 | randIdx = self.randomState.permutation(self.x.shape[0]) 321 | 322 | # Use random index 323 | self.x = self.x[randIdx] 324 | self.y = self.y[randIdx] 325 | self.t = self.t[randIdx] 326 | 327 | # Slice the array for training 328 | self.x = self.dtype(self.x[:self.numSamples]) 329 | self.y = self.dtype(self.y[:self.numSamples]) 330 | self.t = self.dtype(self.t[:self.numSamples]) 331 | 332 | def __len__(self): 333 | """ 334 | Length of the dataset 335 | """ 336 | return self.numBatches 337 | 338 | def __getitem__(self, index): 339 | """ 340 | Returns item at given index 341 | """ 342 | # Generate batch with residual points 343 | x = (self.x[index * self.batchSize: (index + 1) * self.batchSize]) 344 | y = (self.y[index * self.batchSize: (index + 1) * self.batchSize]) 345 | t = (self.t[index * self.batchSize: (index + 1) * self.batchSize]) 346 | return torch.stack([x, y, t], 1) 347 | 348 | 349 | def derivatives(x, u): 350 | """ 351 | Calculate the nn output at postion (x,y) at time t 352 | :param x: position 353 | :param t: time 354 | :return: Approximated solutions and their gradients 355 | """ 356 | # Save input in variabeles is necessary for gradient calculation 357 | x.requires_grad = True 358 | 359 | # Calculate derivatives with torch automatic differentiation 360 | # Move to the same device as prediction 361 | grads = torch.ones(u.shape, device=u.device) 362 | J_U = torch.autograd.grad(u, x, create_graph=True, grad_outputs=grads)[0] 363 | u_x = J_U[:, 0].reshape(u.shape) 364 | u_y = J_U[:, 1].reshape(u.shape) 365 | u_t = J_U[:, 2].reshape(u.shape) 366 | 367 | u_xx = torch.autograd.grad( 368 | u_x, x, create_graph=True, grad_outputs=grads)[0] 369 | u_yy = torch.autograd.grad( 370 | u_y, x, create_graph=True, grad_outputs=grads)[0] 371 | u_xx = u_xx[:, 0].reshape(u.shape) 372 | u_yy = u_yy[:, 1].reshape(u.shape) 373 | 374 | x, y, t = x.T 375 | x = x.reshape(u.shape) 376 | y = y.reshape(u.shape) 377 | t = t.reshape(u.shape) 378 | 379 | return torch.stack([x, y, t, u, u_xx, u_yy, u_t], 1).squeeze() 380 | -------------------------------------------------------------------------------- /examples/Burgers_Equation_1d/Burgers_Equation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | import torch 4 | from torch import Tensor, ones 5 | from torch.autograd import grad 6 | from torch.utils.data import Dataset 7 | import matplotlib.pyplot as plt 8 | 9 | from torch.profiler import profile, record_function, ProfilerActivity 10 | 11 | import NeuralSolvers as nsolv 12 | 13 | # Constants 14 | DEVICE = 'cuda' 15 | NUM_EPOCHS = 1000 # 50000 16 | DOMAIN_LOWER_BOUND = np.array([-1, 0.0]) 17 | DOMAIN_UPPER_BOUND = np.array([1.0, 1.0]) 18 | VISCOSITY = 0.01 / np.pi 19 | NOISE = 0.0 20 | NUM_INITIAL_POINTS = 100 21 | NUM_COLLOCATION_POINTS = 10000 22 | 23 | 24 | # define underlying PDE 25 | def burger1D(x, u): 26 | grads = ones(u.shape, device=u.device) # move to the same device as prediction 27 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 28 | # calculate first order derivatives 29 | u_x = grad_u[:, 0] 30 | u_t = grad_u[:, 1] 31 | 32 | grads = ones(u_x.shape, device=u.device) # move to the same device as prediction 33 | # calculate second order derivatives 34 | grad_u_x = grad(u_x, x, create_graph=True, grad_outputs=grads)[0] 35 | u_xx = grad_u_x[:, 0] 36 | 37 | # reshape for correct behavior of the optimizer 38 | u_x = u_x.reshape(-1, 1) 39 | u_t = u_t.reshape(-1, 1) 40 | u_xx = u_xx.reshape(-1, 1) 41 | 42 | f = u_t + u * u_x - (0.01 / np.pi) * u_xx 43 | return f 44 | 45 | 46 | class InitialConditionDataset(Dataset): 47 | 48 | def __init__(self, n0, device = 'cpu',file_path = 'burgers_shock.mat'): 49 | """ 50 | Constructor of the boundary condition dataset 51 | 52 | Args: 53 | n0 (int) 54 | """ 55 | super(type(self)).__init__() 56 | self.device = device 57 | data = scipy.io.loadmat(file_path) 58 | 59 | t = data['t'].flatten()[:, None] 60 | x = data['x'].flatten()[:, None] 61 | 62 | Exact = np.real(data['usol']).T 63 | 64 | X, T = np.meshgrid(x, t) 65 | xx1 = np.hstack((X[0:1, :].T, T[0:1, :].T)) 66 | uu1 = Exact[0:1, :].T 67 | xx2 = np.hstack((X[:, 0:1], T[:, 0:1])) 68 | uu2 = Exact[:, 0:1] 69 | xx3 = np.hstack((X[:, -1:], T[:, -1:])) 70 | uu3 = Exact[:, -1:] 71 | 72 | X_u_train = np.vstack([xx1, xx2, xx3]) 73 | u_train = np.vstack([uu1, uu2, uu3]) 74 | 75 | idx = np.random.choice(X_u_train.shape[0], n0, replace=False) 76 | self.X_u_train = Tensor(X_u_train[idx, :]).to(self.device) 77 | self.u_train = Tensor(u_train[idx, :]).to(self.device) 78 | 79 | def __len__(self): 80 | """ 81 | There exists no batch processing. So the size is 1 82 | """ 83 | return 1 84 | 85 | def __getitem__(self, idx): 86 | x = self.X_u_train 87 | y = self.u_train 88 | 89 | return Tensor(x).float(), Tensor(y).float() 90 | 91 | 92 | def load_burger_data(file_path: str = 'burgers_shock.mat'): 93 | """Load and return the Burgers equation data.""" 94 | data = scipy.io.loadmat(file_path) 95 | t = data['t'].flatten()[:, None] 96 | x = data['x'].flatten()[:, None] 97 | exact_solution = np.real(data['usol']).T 98 | return t, x, exact_solution 99 | 100 | 101 | def setup_pinn(model = None, file_path: str = 'burgers_shock.mat'): 102 | """Set up and return a Physics Informed Neural Network (PINN) for solving 1D Burgers equation. 103 | 104 | Creates a PINN with: 105 | 1. Initial condition dataset with NUM_INITIAL_POINTS training points 106 | 2. Latin Hypercube Sampling (LHS) for collocation points in the domain 107 | 3. PDE loss function for 1D Burgers equation 108 | 4. Multi-layer perceptron (MLP) as the neural network architecture 109 | 110 | Architecture: 111 | - Input size: 2 (x, t coordinates) 112 | - Output size: 1 (u velocity) 113 | - Hidden layers: 8 layers with 40 neurons each 114 | - Activation: tanh 115 | - Domain bounds: [DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND] 116 | 117 | Components: 118 | - Initial Condition: Sampled from exact solution at t=0 119 | - Collocation Points: NUM_COLLOCATION_POINTS × NUM_COLLOCATION_POINTS grid 120 | - PDE Loss: Enforces Burgers equation physics 121 | - Boundary Conditions: None (assuming periodic or infinite domain) 122 | 123 | Returns: 124 | nsolv.PINN: Configured PINN model ready for training 125 | 126 | Notes: 127 | - Uses Latin Hypercube Sampling for optimal domain coverage 128 | - All computations performed on specified DEVICE (CPU/GPU) 129 | - Burgers equation: ∂u/∂t + u∂u/∂x = ν∂²u/∂x² 130 | """ 131 | 132 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS, device=DEVICE, file_path=file_path) 133 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial Condition') 134 | 135 | sampler = nsolv.samplers.LHSSampler() 136 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 137 | sampler, device=DEVICE) 138 | 139 | pde_loss = nsolv.pinn.PDELoss(geometry, burger1D, name='PDE') 140 | 141 | if(model is None): 142 | model = nsolv.models.mlp.MLP( 143 | input_size=2, output_size=1, device=DEVICE, 144 | hidden_size=40, num_hidden=8, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 145 | activation=torch.tanh 146 | ) 147 | 148 | return nsolv.PINN(model, 2, 1, pde_loss, initial_condition, [], device=DEVICE) 149 | 150 | 151 | def train_pinn_profiler(pinn, num_epochs): 152 | """Train the PINN model.""" 153 | with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof: 154 | with record_function("model_training"): 155 | pinn.fit(num_epochs, checkpoint_path='checkpoint.pt', restart=True, 156 | logger=None, lbfgs_finetuning=False, writing_cycle=1000) 157 | 158 | print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) 159 | print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10)) 160 | 161 | 162 | def train_pinn(pinn, num_epochs, logger = None): 163 | """Train the PINN model.""" 164 | pinn.fit(num_epochs, checkpoint_path='checkpoint.pt', restart=True, 165 | logger=logger, lbfgs_finetuning=False, writing_cycle=1000) 166 | 167 | 168 | def plot_solution(pinn, t, x, exact_solution): 169 | """Plot the PINN solution.""" 170 | X, T = np.meshgrid(x, t) 171 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 172 | 173 | pred = pinn(Tensor(X_star).to(DEVICE)) 174 | pred = pred.detach().cpu().numpy().reshape(X.shape) 175 | 176 | plt.imshow(pred.T, interpolation='nearest', cmap='rainbow', 177 | extent=[t.min(), t.max(), x.min(), x.max()], 178 | origin='lower', aspect='auto') 179 | plt.xlabel(r'$t$') 180 | plt.ylabel(r'$x$') 181 | plt.title(r"$u(x,t)$") 182 | plt.colorbar() 183 | plt.show() 184 | 185 | 186 | if __name__ == "__main__": 187 | pinn = setup_pinn() 188 | train_pinn(pinn, NUM_EPOCHS) 189 | 190 | #pinn.load_model('best_model_pinn.pt') 191 | t, x, exact_solution = load_burger_data() 192 | plot_solution(pinn, t, x, exact_solution) -------------------------------------------------------------------------------- /examples/Burgers_Equation_1d/Burgers_Equation_adaptive.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | import torch 4 | from torch import Tensor, ones 5 | from torch.autograd import grad 6 | from torch.utils.data import Dataset 7 | import matplotlib.pyplot as plt 8 | import NeuralSolvers as nsolv 9 | from Burgers_Equation import burger1D 10 | 11 | # Constants 12 | DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' 13 | NUM_EPOCHS = 50000 14 | DOMAIN_LOWER_BOUND = np.array([-1, 0.0]) 15 | DOMAIN_UPPER_BOUND = np.array([1.0, 1.0]) 16 | VISCOSITY = 0.01 / np.pi 17 | NOISE = 0.0 18 | NUM_INITIAL_POINTS = 100 19 | NUM_COLLOCATION_POINTS = 10000 20 | ADAPTIVE_SAMPLE_SIZE = 5000 21 | 22 | 23 | class InitialConditionDataset(Dataset): 24 | def __init__(self, n0, device=DEVICE): 25 | super().__init__() 26 | self.device = device 27 | data = scipy.io.loadmat('burgers_shock.mat') 28 | 29 | t, x = data['t'].flatten()[:, None], data['x'].flatten()[:, None] 30 | Exact = np.real(data['usol']).T 31 | 32 | X, T = np.meshgrid(x, t) 33 | X_u_train = np.vstack([ 34 | np.hstack((X[0:1, :].T, T[0:1, :].T)), 35 | np.hstack((X[:, 0:1], T[:, 0:1])), 36 | np.hstack((X[:, -1:], T[:, -1:])) 37 | ]) 38 | u_train = np.vstack([ 39 | Exact[0:1, :].T, 40 | Exact[:, 0:1], 41 | Exact[:, -1:] 42 | ]) 43 | 44 | idx = np.random.choice(X_u_train.shape[0], n0, replace=False) 45 | self.X_u_train = Tensor(X_u_train[idx, :]).to(self.device) 46 | self.u_train = Tensor(u_train[idx, :]).to(self.device) 47 | 48 | def __len__(self): 49 | return 1 50 | 51 | def __getitem__(self, idx): 52 | return self.X_u_train.float(), self.u_train.float() 53 | 54 | 55 | def load_burger_data(file_path='burgers_shock.mat'): 56 | data = scipy.io.loadmat(file_path) 57 | t, x = data['t'].flatten()[:, None], data['x'].flatten()[:, None] 58 | exact_solution = np.real(data['usol']).T 59 | return t, x, exact_solution 60 | 61 | 62 | def setup_pinn(): 63 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS, device=DEVICE) 64 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial condition') 65 | 66 | model = nsolv.models.MLP( 67 | input_size=2, output_size=1, device=DEVICE, 68 | hidden_size=40, num_hidden=8, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 69 | activation=torch.tanh 70 | ) 71 | 72 | sampler = nsolv.samplers.AdaptiveSampler(ADAPTIVE_SAMPLE_SIZE, model, burger1D) 73 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 74 | sampler, device=DEVICE) 75 | 76 | pde_loss = nsolv.pinn.PDELoss(geometry, burger1D, name='1D Burgers equation') 77 | 78 | return nsolv.pinn.PINN(model, 2, 1, pde_loss, initial_condition, [], device=DEVICE) 79 | 80 | 81 | def train_pinn(pinn, num_epochs): 82 | #logger = nsolv.WandbLogger("1D Burgers equation pinn", {"num_epochs": num_epochs}) 83 | logger = None 84 | pinn.fit(num_epochs, checkpoint_path='checkpoint.pt', restart=True, 85 | logger=logger, lbfgs_finetuning=False, writing_cycle=1000) 86 | 87 | 88 | def plot_solution(pinn, t, x, exact_solution): 89 | X, T = np.meshgrid(x, t) 90 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 91 | 92 | pred = pinn(Tensor(X_star).to(DEVICE)) 93 | pred = pred.detach().cpu().numpy().reshape(X.shape) 94 | 95 | plt.figure(figsize=(10, 8)) 96 | plt.imshow(pred.T, interpolation='nearest', cmap='rainbow', 97 | extent=[t.min(), t.max(), x.min(), x.max()], 98 | origin='lower', aspect='auto') 99 | plt.xlabel(r'$t$') 100 | plt.ylabel(r'$x$') 101 | plt.title(r"$u(x,t)$ - Adaptive PINN Solution") 102 | plt.colorbar() 103 | plt.show() 104 | 105 | 106 | if __name__ == "__main__": 107 | pinn = setup_pinn() 108 | train_pinn(pinn, NUM_EPOCHS) 109 | 110 | pinn.load_model('best_model_pinn.pt') 111 | t, x, exact_solution = load_burger_data() 112 | plot_solution(pinn, t, x, exact_solution) -------------------------------------------------------------------------------- /examples/Burgers_Equation_1d/burgers_equation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Burgers_Equation_1d/burgers_equation.png -------------------------------------------------------------------------------- /examples/Burgers_Equation_1d/burgers_shock.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Burgers_Equation_1d/burgers_shock.mat -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Analytical_Solution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/Analytical_Solution.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Analytical_different_t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/Analytical_different_t.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Heat_Equation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import Tensor, ones 4 | from torch.autograd import grad 5 | from torch.utils.data import Dataset 6 | import matplotlib.pyplot as plt 7 | import NeuralSolvers as nsolv 8 | 9 | # Constants 10 | DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' 11 | NUM_EPOCHS = 1000 12 | DOMAIN_LOWER_BOUND = np.array([0, 0.0]) 13 | DOMAIN_UPPER_BOUND = np.array([1.0, 2.0]) 14 | NUM_INITIAL_POINTS = 50 15 | NUM_BOUNDARY_POINTS = 50 16 | NUM_COLLOCATION_POINTS = 20000 17 | 18 | 19 | def heat1d(x, u): 20 | grads = ones(u.shape, device=u.device) 21 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 22 | u_x, u_t = grad_u[:, 0], grad_u[:, 1] 23 | 24 | grads = ones(u_x.shape, device=u.device) 25 | u_xx = grad(u_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 26 | 27 | u_x, u_t, u_xx = [tensor.reshape(-1, 1) for tensor in (u_x, u_t, u_xx)] 28 | 29 | return u_t - u_xx 30 | 31 | 32 | class InitialConditionDataset(Dataset): 33 | def __init__(self, n0): 34 | super().__init__() 35 | L, c = 1, 1 36 | alpha = (c * np.pi / L) ** 2 37 | x = np.linspace(0, L, 200).flatten()[:, None] 38 | t = np.zeros(200).flatten()[:, None] 39 | u = (np.exp(-alpha * t)) * np.sin(np.pi * x / L) 40 | 41 | idx = np.random.choice(x.shape[0], n0, replace=False) 42 | self.x = Tensor(x[idx, :]).float().to(DEVICE) 43 | self.u = Tensor(u[idx, :]).float().to(DEVICE) 44 | self.t = Tensor(t[idx, :]).float().to(DEVICE) 45 | 46 | def __len__(self): 47 | return 1 48 | 49 | def __getitem__(self, idx): 50 | x = torch.cat([self.x, self.t], dim=1) 51 | y = self.u 52 | return x,y 53 | 54 | 55 | class BoundaryConditionDataset(Dataset): 56 | def __init__(self, nb, is_lower): 57 | super().__init__() 58 | max_t = 2 59 | t = np.linspace(0, max_t, 200).flatten()[:, None] 60 | idx_t = np.random.choice(t.shape[0], nb, replace=False) 61 | tb = t[idx_t, :] 62 | x_val = DOMAIN_LOWER_BOUND[0] if is_lower else DOMAIN_UPPER_BOUND[0] 63 | self.x_b = Tensor(np.concatenate((np.full_like(tb, x_val), tb), 1)).float().to(DEVICE) 64 | 65 | def __len__(self): 66 | return 1 67 | 68 | def __getitem__(self, idx): 69 | return self.x_b 70 | 71 | 72 | def setup_pinn(model = None): 73 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS) 74 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial Condition loss') 75 | 76 | bc_dataset_lb = BoundaryConditionDataset(nb=NUM_BOUNDARY_POINTS, is_lower=True) 77 | bc_dataset_ub = BoundaryConditionDataset(nb=NUM_BOUNDARY_POINTS, is_lower=False) 78 | 79 | def dirichlet_func(x): 80 | return torch.zeros_like(x)[:, 0].reshape(-1, 1) 81 | 82 | dirichlet_bc_lb = nsolv.pinn.datasets.DirichletBC(dirichlet_func, bc_dataset_lb, name='Lower dirichlet BC') 83 | dirichlet_bc_ub = nsolv.pinn.datasets.DirichletBC(dirichlet_func, bc_dataset_ub, name='Upper dirichlet BC') 84 | 85 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 86 | nsolv.samplers.LHSSampler(), device=DEVICE) 87 | 88 | pde_loss = nsolv.pinn.PDELoss(geometry, heat1d, name='PDE loss', weight=1) 89 | 90 | if(model is None): 91 | model = nsolv.models.MLP( 92 | input_size=2, output_size=1, device=DEVICE, 93 | hidden_size=100, num_hidden=4, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 94 | activation=torch.tanh 95 | ) 96 | 97 | return nsolv.pinn.PINN(model, 2, 1, pde_loss, initial_condition, [dirichlet_bc_lb, dirichlet_bc_ub], device=DEVICE) 98 | 99 | 100 | def train_pinn(pinn, num_epochs, logger = None): 101 | #logger = nsolv.WandbLogger("1D Heat equation pinn", {"num_epochs": num_epochs}) 102 | pinn.fit(num_epochs, checkpoint_path='checkpoint.pt', restart=True, logger=logger, 103 | lbfgs_finetuning=False, pretraining=True) 104 | 105 | 106 | def plot_solution(pinn): 107 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200).flatten()[:, None] 108 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200).flatten()[:, None] 109 | X, T = np.meshgrid(x, t) 110 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 111 | 112 | pred = pinn(Tensor(X_star).to(DEVICE)) 113 | H_pred = pred.detach().cpu().numpy().reshape(X.shape) 114 | 115 | plt.figure(figsize=(10, 8)) 116 | plt.imshow(H_pred.T, interpolation='nearest', cmap='YlGnBu', 117 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 118 | origin='lower', aspect='auto') 119 | plt.ylabel('x (cm)') 120 | plt.xlabel('t (seconds)') 121 | plt.colorbar().set_label('Temperature (°C)') 122 | plt.title("PINN Solution: 1D Heat Equation") 123 | plt.show() 124 | 125 | 126 | def plot_analytical_solution(): 127 | L, c = 1, 1 128 | alpha = (c * np.pi / L) ** 2 129 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200) 130 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200) 131 | X, T = np.meshgrid(x, t) 132 | U = (np.exp(-alpha * T)) * np.sin(np.pi * X / L) 133 | 134 | plt.figure(figsize=(10, 8)) 135 | plt.imshow(U.T, interpolation='nearest', cmap='YlGnBu', 136 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 137 | origin='lower', aspect='auto') 138 | plt.ylabel('x (cm)') 139 | plt.xlabel('t (seconds)') 140 | plt.colorbar().set_label('Temperature (°C)') 141 | plt.title("Analytical Solution: 1D Heat Equation") 142 | plt.show() 143 | 144 | 145 | if __name__ == "__main__": 146 | pinn = setup_pinn() 147 | train_pinn(pinn, NUM_EPOCHS) 148 | plot_solution(pinn) 149 | plot_analytical_solution() -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Heat_Equation_adaptive.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import Tensor, ones 4 | from torch.autograd import grad 5 | from torch.utils.data import Dataset 6 | import matplotlib.pyplot as plt 7 | import NeuralSolvers as nsolv 8 | 9 | from Heat_Equation import heat1d 10 | 11 | # Constants 12 | DEVICE = 'cpu' if torch.cuda.is_available() else 'cpu' 13 | NUM_EPOCHS = 10000 14 | DOMAIN_LOWER_BOUND = np.array([0, 0.0]) 15 | DOMAIN_UPPER_BOUND = np.array([1.0, 2.0]) 16 | NUM_INITIAL_POINTS = 50 17 | NUM_BOUNDARY_POINTS = 50 18 | NUM_COLLOCATION_POINTS = 20000 19 | NUM_SEED_POINTS = 10000 20 | HIDDEN_SIZE = 100 21 | NUM_HIDDEN = 4 22 | 23 | class InitialConditionDataset(Dataset): 24 | def __init__(self, n0): 25 | super().__init__() 26 | L, c = 1, 1 27 | alpha = (c * np.pi / L) ** 2 28 | x = np.linspace(0, L, 200).flatten()[:, None] 29 | t = np.zeros(200).flatten()[:, None] 30 | u = (np.exp(-alpha * t)) * np.sin(np.pi * x / L) 31 | 32 | idx = np.random.choice(x.shape[0], n0, replace=False) 33 | self.x, self.u, self.t = x[idx, :], u[idx, :], t[idx, :] 34 | 35 | def __len__(self): 36 | return 1 37 | 38 | def __getitem__(self, idx): 39 | x = np.concatenate([self.x, self.t], axis=1) 40 | y = self.u 41 | return Tensor(x).float(), Tensor(y).float() 42 | 43 | class BoundaryConditionDataset(Dataset): 44 | def __init__(self, nb, is_lower): 45 | super().__init__() 46 | max_t = 2 47 | t = np.linspace(0, max_t, 200).flatten()[:, None] 48 | idx_t = np.random.choice(t.shape[0], nb, replace=False) 49 | tb = t[idx_t, :] 50 | x_val = DOMAIN_LOWER_BOUND[0] if is_lower else DOMAIN_UPPER_BOUND[0] 51 | self.x_b = np.concatenate((np.full_like(tb, x_val), tb), 1) 52 | 53 | def __len__(self): 54 | return 1 55 | 56 | def __getitem__(self, idx): 57 | return Tensor(self.x_b).float() 58 | 59 | def setup_pinn(): 60 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS) 61 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial condition') 62 | 63 | bc_dataset_lb = BoundaryConditionDataset(nb=NUM_BOUNDARY_POINTS, is_lower=True) 64 | bc_dataset_ub = BoundaryConditionDataset(nb=NUM_BOUNDARY_POINTS, is_lower=False) 65 | 66 | def dirichlet_func(x): 67 | return torch.zeros_like(x)[:, 0].reshape(-1, 1) 68 | 69 | dirichlet_bc_lb = nsolv.pinn.datasets.DirichletBC(dirichlet_func, bc_dataset_lb, name='Lower dirichlet BC') 70 | dirichlet_bc_ub = nsolv.pinn.datasets.DirichletBC(dirichlet_func, bc_dataset_ub, name='Upper dirichlet BC') 71 | 72 | model = nsolv.models.MLP( 73 | input_size=2, output_size=1, device=DEVICE, 74 | hidden_size=HIDDEN_SIZE, num_hidden=NUM_HIDDEN, 75 | lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND 76 | ) 77 | 78 | sampler = nsolv.samplers.AdaptiveSampler(NUM_SEED_POINTS, model, heat1d) 79 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 80 | sampler, device=DEVICE) 81 | 82 | pde_loss = nsolv.pinn.PDELoss(geometry, heat1d, name='1D Heat') 83 | 84 | return nsolv.pinn.PINN(model, 2, 1, pde_loss, initial_condition, [dirichlet_bc_lb, dirichlet_bc_ub], device=DEVICE) 85 | 86 | def train_pinn(pinn): 87 | '''logger = nsolv.WandbLogger("1D Heat equation pinn", { 88 | "num_epochs": NUM_EPOCHS, 89 | "hidden_size": HIDDEN_SIZE, 90 | "num_hidden": NUM_HIDDEN 91 | }) 92 | ''' 93 | logger = None 94 | pinn.fit(NUM_EPOCHS, checkpoint_path='checkpoint.pt', restart=True, 95 | logger=logger, lbfgs_finetuning=False, pretraining=True) 96 | 97 | def plot_solution(pinn): 98 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200).flatten()[:, None] 99 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200).flatten()[:, None] 100 | X, T = np.meshgrid(x, t) 101 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 102 | 103 | pred = pinn(Tensor(X_star).to(DEVICE)) 104 | H_pred = pred.detach().cpu().numpy().reshape(X.shape) 105 | 106 | plt.figure(figsize=(10, 8)) 107 | plt.imshow(H_pred.T, interpolation='nearest', cmap='YlGnBu', 108 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 109 | origin='lower', aspect='auto') 110 | plt.ylabel('x (cm)') 111 | plt.xlabel('t (seconds)') 112 | plt.colorbar().set_label('Temperature (°C)') 113 | plt.title("PINN Solution: 1D Heat Equation (Adaptive)") 114 | plt.show() 115 | 116 | def plot_analytical_solution(): 117 | L, c = 1, 1 118 | alpha = (c * np.pi / L) ** 2 119 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200) 120 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200) 121 | X, T = np.meshgrid(x, t) 122 | U = (np.exp(-alpha * T)) * np.sin(np.pi * X / L) 123 | 124 | plt.figure(figsize=(10, 8)) 125 | plt.imshow(U.T, interpolation='nearest', cmap='YlGnBu', 126 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 127 | origin='lower', aspect='auto') 128 | plt.ylabel('x (cm)') 129 | plt.xlabel('t (seconds)') 130 | plt.colorbar().set_label('Temperature (°C)') 131 | 132 | plt.title("Analytical Solution: 1D Heat Equation") 133 | plt.show() 134 | 135 | if __name__ == "__main__": 136 | pinn = setup_pinn() 137 | train_pinn(pinn) 138 | pinn.load_model('best_model_pinn.pt') 139 | plot_solution(pinn) 140 | plot_analytical_solution() -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Heat_Equation_inversion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import Tensor, ones 4 | from torch.autograd import grad 5 | from torch.utils.data import Dataset 6 | import matplotlib.pyplot as plt 7 | import NeuralSolvers as nsolv 8 | import wandb 9 | 10 | # Constants 11 | DEVICE = 'cpu' if torch.cuda.is_available() else 'cpu' 12 | NUM_EPOCHS = 10000 13 | DOMAIN_LOWER_BOUND = np.array([0, 0.0]) 14 | DOMAIN_UPPER_BOUND = np.array([1.0, 2.0]) 15 | NUM_INITIAL_POINTS = 10000 16 | NUM_COLLOCATION_POINTS = 20000 17 | 18 | def derivatives(x, u): 19 | grads = ones(u.shape, device=u.device) 20 | grad_u = grad(u, x, create_graph=True, grad_outputs=grads)[0] 21 | u_x, u_t = grad_u[:, 0], grad_u[:, 1] 22 | 23 | grads = ones(u_x.shape, device=u.device) 24 | u_xx = grad(u_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 25 | 26 | u_x, u_t, u_xx = [tensor.reshape(-1, 1) for tensor in (u_x, u_t, u_xx)] 27 | return torch.cat([u_xx, u_t], dim=1) 28 | 29 | class HPM_model(torch.nn.Module): 30 | def __init__(self): 31 | super().__init__() 32 | self.c = torch.nn.Parameter(torch.randn(1), requires_grad=True) 33 | 34 | def forward(self, derivatives): 35 | return self.c * derivatives 36 | 37 | class InitialConditionDataset(Dataset): 38 | def __init__(self, n0): 39 | super().__init__() 40 | L, c = 1, 1 41 | alpha = (c * np.pi / L) ** 2 42 | max_t, max_x = 10, L 43 | 44 | t = np.linspace(0, max_t, 200) 45 | x = np.linspace(0, max_x, 200) 46 | X, T = np.meshgrid(x, t, indexing='ij') 47 | 48 | U = (np.exp(-(alpha)*T)) * np.sin(np.pi*X/L) 49 | U, X, T = [arr.reshape(-1, 1) for arr in (U, X, T)] 50 | 51 | idx = np.random.choice(X.shape[0], n0, replace=False) 52 | self.x, self.u, self.t = X[idx, :], U[idx, :], T[idx, :] 53 | 54 | def __len__(self): 55 | return 1 56 | 57 | def __getitem__(self, idx): 58 | x = np.concatenate([self.x, self.t], axis=1) 59 | y = self.u 60 | return Tensor(x).float(), Tensor(y).float() 61 | 62 | def setup_pinn(args): 63 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS) 64 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Interpolation condition') 65 | 66 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 67 | nsolv.samplers.LHSSampler(), device=DEVICE) 68 | 69 | hpm_model = HPM_model() 70 | #wandb.watch(hpm_model) 71 | pde_loss = nsolv.pinn.HPMLoss(geometry, "HPM_loss", derivatives, hpm_model) 72 | 73 | model = nsolv.models.MLP( 74 | input_size=2, output_size=1, device=DEVICE, 75 | hidden_size=args.hidden_size, num_hidden=args.num_hidden, 76 | lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND 77 | ) 78 | 79 | return nsolv.pinn.PINN(model, 2, 1, pde_loss, initial_condition, boundary_condition=None, device=DEVICE) 80 | 81 | def train_pinn(pinn, args): 82 | #logger = nsolv.WandbLogger("1D Heat equation inversion", args) 83 | logger = None 84 | pinn.fit(args.num_epochs, epochs_pt=200, checkpoint_path=None, restart=True, 85 | logger=logger, lbfgs_finetuning=False, pretraining=True) 86 | 87 | def plot_solution(pinn): 88 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200).flatten()[:, None] 89 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200).flatten()[:, None] 90 | X, T = np.meshgrid(x, t, indexing='ij') 91 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 92 | 93 | pred = pinn(Tensor(X_star).to(DEVICE)) 94 | H_pred = pred.detach().cpu().numpy().reshape(X.shape) 95 | 96 | plt.figure(figsize=(10, 8)) 97 | plt.imshow(H_pred, interpolation='nearest', cmap='YlGnBu', 98 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 99 | origin='lower', aspect='auto') 100 | plt.ylabel('x (cm)') 101 | plt.xlabel('t (seconds)') 102 | plt.colorbar().set_label('Temperature (°C)') 103 | plt.title("PINN Solution: 1D Heat Equation Inversion") 104 | plt.show() 105 | 106 | def plot_analytical_solution(): 107 | L, c = 1, 1 108 | alpha = (c * np.pi / L) ** 2 109 | t = np.linspace(0, DOMAIN_UPPER_BOUND[1], 200) 110 | x = np.linspace(DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0], 200) 111 | X, T = np.meshgrid(x, t) 112 | U = (np.exp(-alpha * T)) * np.sin(np.pi * X / L) 113 | 114 | plt.figure(figsize=(10, 8)) 115 | plt.imshow(U.T, interpolation='nearest', cmap='YlGnBu', 116 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 117 | origin='lower', aspect='auto') 118 | plt.ylabel('x (cm)') 119 | plt.xlabel('t (seconds)') 120 | plt.colorbar().set_label('Temperature (°C)') 121 | plt.title("Analytical Solution: 1D Heat Equation") 122 | plt.show() 123 | 124 | if __name__ == "__main__": 125 | import argparse 126 | parser = argparse.ArgumentParser() 127 | parser.add_argument("--num_epochs", type=int, default=10000) 128 | parser.add_argument("--hidden_size", type=int, default=100) 129 | parser.add_argument("--num_hidden", type=int, default=4) 130 | args = parser.parse_args() 131 | 132 | pinn = setup_pinn(args) 133 | train_pinn(pinn, args) 134 | pinn.load_model('best_model_pinn.pt', 'best_model_hpm.pt') 135 | plot_solution(pinn) 136 | plot_analytical_solution() 137 | 138 | # Print the inferred parameter 139 | print(f"Inferred parameter c: {pinn.pde_loss.hpm_model.c.item()}") 140 | -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/Initial_condition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/Initial_condition.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PDE_residual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PDE_residual.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PINN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PINN.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PINN_different_t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PINN_different_t.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PINNvsSol_100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PINNvsSol_100.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PINNvsSol_199.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PINNvsSol_199.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/PINNvsSol_50.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Heat_Equation_1d/PINNvsSol_50.png -------------------------------------------------------------------------------- /examples/Heat_Equation_1d/README.md: -------------------------------------------------------------------------------- 1 | # PINN example of a 1D Heat Equation 2 | 3 | 4 | ## How to run: 5 | 6 | python 1D_Heat_Equation.py --num_epochs=10000 --n0=50 --nb=50 --nf=20000 --num_hidden=4 --hidden_size=100 7 | 8 | OR simply the default: python 1D_Heat_Equation.py 9 | 10 | --- 11 | ## Problem Setup 12 | PDE: 13 | 14 | 15 | 16 | Initial condition: 17 | 18 | 19 | 20 | Boundary conditions: 21 | 22 | 23 | 24 | 25 | Analytical Solution: 26 | 27 | 28 | 29 | 30 | --- 31 | ## Plots 32 | 33 | 34 | 35 | PINN | Analytical solution 36 | :-------------------------:|:-------------------------: 37 | ![PINN](PINN.png) | ![Analytical Solution](Analytical_Solution.png) 38 | 39 | --- 40 | 1) 41 |
42 | 43 |
44 | 45 | MAE: 0.001435936527217306 46 | 47 | MSE: 3.034339863805564e-06 48 | 49 | Relative error (%): 0.24696509076465287 50 | 51 | --- 52 | 2) 53 |
54 | 55 |
56 | 57 | --- 58 | 3) 59 |
60 | 61 |
62 | 63 | MAE: 0.0011075189671584994 64 | 65 | MSE: 1.7055700155598833e-06 66 | 67 | Relative error (%): 26.39157042734111 68 | 69 | --- 70 | 4) 71 |
72 | 73 |
74 | 75 | MAE: 0.0013523007271688022 76 | 77 | MSE: 1.8343774149890471e-06 78 | 79 | Relative error (%): 3901.2322415257822 80 | 81 | --- 82 | 83 | 5) 84 |
85 | 86 |
87 | 88 | MAE: 0.0008895651348377466 89 | 90 | MSE: 8.096860837205663e-07 91 | 92 | Relative error (%): 47686039.576932296 93 | 94 | --- 95 | 6) 96 |
97 | 98 |
99 | 100 | ---- 101 | 7) 102 |
103 | 104 |
105 | -------------------------------------------------------------------------------- /examples/Schroedinger_1d/NLS.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/examples/Schroedinger_1d/NLS.mat -------------------------------------------------------------------------------- /examples/Schroedinger_1d/Schroedinger-adaptive.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from argparse import ArgumentParser 3 | import numpy as np 4 | import scipy.io 5 | import torch 6 | from torch import Tensor 7 | from torch.utils.data import Dataset 8 | import matplotlib.pyplot as plt 9 | import NeuralSolvers as nsolv 10 | from Schroedinger import schroedinger1d 11 | 12 | 13 | DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' 14 | 15 | 16 | 17 | class BoundaryConditionDataset(Dataset): 18 | 19 | def __init__(self, nb, lb, ub): 20 | """ 21 | Constructor of the initial condition dataset 22 | 23 | Args: 24 | n0 (int) 25 | """ 26 | super(type(self)).__init__() 27 | data = scipy.io.loadmat('NLS.mat') 28 | t = data['tt'].flatten()[:, None] 29 | idx_t = np.random.choice(t.shape[0], nb, replace=False) 30 | tb = t[idx_t, :] 31 | self.x_lb = np.concatenate((0 * tb + lb[0], tb), 1) # (lb[0], tb) 32 | self.x_ub = np.concatenate((0 * tb + ub[0], tb), 1) # (ub[0], tb) 33 | 34 | def __getitem__(self, idx): 35 | """ 36 | Returns data for initial state 37 | """ 38 | return Tensor(self.x_lb).float(), Tensor(self.x_ub).float() 39 | 40 | def __len__(self): 41 | """ 42 | There exists no batch processing. So the size is 1 43 | """ 44 | return 1 45 | 46 | 47 | class InitialConditionDataset(Dataset): 48 | 49 | def __init__(self, n0): 50 | """ 51 | Constructor of the boundary condition dataset 52 | 53 | Args: 54 | n0 (int) 55 | """ 56 | super(type(self)).__init__() 57 | data = scipy.io.loadmat('NLS.mat') 58 | x = data['x'].flatten()[:, None] 59 | t = data['tt'].flatten()[:, None] 60 | Exact = data['uu'] 61 | Exact_u = np.real(Exact) 62 | Exact_v = np.imag(Exact) 63 | idx_x = np.random.choice(x.shape[0], n0, replace=False) 64 | self.x = x[idx_x, :] 65 | self.u = Exact_u[idx_x, 0:1] 66 | self.v = Exact_v[idx_x, 0:1] 67 | self.t = np.zeros(self.x.shape) 68 | 69 | def __len__(self): 70 | """ 71 | There exists no batch processing. So the size is 1 72 | """ 73 | return 1 74 | 75 | def __getitem__(self, idx): 76 | x = np.concatenate([self.x, self.t], axis=1) 77 | y = np.concatenate([self.u, self.v], axis=1) 78 | return Tensor(x).float(), Tensor(y).float() 79 | 80 | 81 | if __name__ == "__main__": 82 | parser = ArgumentParser() 83 | parser.add_argument("--num_epochs", dest="num_epochs", type=int, default=10000, help='Number of training iterations') 84 | parser.add_argument('--n0', dest='n0', type=int, default=50, help='Number of input points for initial condition') 85 | parser.add_argument('--nb', dest='nb', type=int, default=50, help='Number of input points for boundary condition') 86 | parser.add_argument('--nf', dest='nf', type=int, default=20000, help='Number of input points for pde loss') 87 | parser.add_argument('--ns', dest='ns', type=int, default=10000, help='Number of seed points') 88 | parser.add_argument('--nf_batch', dest='nf_batch', type=int, default=20000, help='Batch size for sampler') 89 | parser.add_argument('--num_hidden', dest='num_hidden', type=int, default=4, help='Number of hidden layers') 90 | parser.add_argument('--hidden_size', dest='hidden_size', type=int, default=100, help='Size of hidden layers') 91 | parser.add_argument('--annealing', dest='annealing', type=int, default=0, help='Enables annealing with 1') 92 | parser.add_argument('--annealing_cycle', dest='annealing_cycle', type=int, default=5, help='Cycle of lr annealing') 93 | parser.add_argument('--track_gradient', dest='track_gradient', default=1, help='Enables tracking of the gradients') 94 | args = parser.parse_args() 95 | # Domain bounds 96 | lb = np.array([-5.0, 0.0]) 97 | ub = np.array([5.0, np.pi / 2]) 98 | # initial condition 99 | ic_dataset = InitialConditionDataset(n0=args.n0) 100 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial condition') 101 | # boundary conditions 102 | bc_dataset = BoundaryConditionDataset(nb=args.nb, lb=lb, ub=ub) 103 | periodic_bc_u = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 0, "u periodic boundary condition") 104 | periodic_bc_v = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 1, "v periodic boundary condition") 105 | periodic_bc_u_x = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 0, "u_x periodic boundary condition", 1, 0) 106 | periodic_bc_v_x = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 1, "v_x periodic boundary condition", 1, 0) 107 | 108 | 109 | model = nsolv.models.MLP(input_size=2, 110 | output_size=2, 111 | hidden_size=args.hidden_size, 112 | num_hidden=args.num_hidden, 113 | lb=lb, 114 | ub=ub) 115 | 116 | # sampler 117 | sampler = nsolv.samplers.AdaptiveSampler(args.ns, model, schroedinger1d) 118 | 119 | # geometry of the domain 120 | geometry = nsolv.NDCube(lb, ub, args.nf, args.nf_batch, sampler) 121 | 122 | pde_loss = nsolv.pinn.PDELoss(geometry, schroedinger1d, name='1D Schrodinger') 123 | 124 | 125 | #logger = nsolv.loggers.WandbLogger('1D Schrödinger Equation', args, 'aipp') 126 | logger = None 127 | pinn = nsolv.PINN(model, 2, 2, pde_loss, initial_condition, [periodic_bc_u, 128 | periodic_bc_v, 129 | periodic_bc_u_x, 130 | periodic_bc_v_x], device=DEVICE) 131 | pinn.fit(args.num_epochs, checkpoint_path='checkpoint.pt', 132 | restart=True, logger=logger, activate_annealing=args.annealing, annealing_cycle=args.annealing_cycle, 133 | writing_cycle=500, 134 | track_gradient=args.track_gradient) 135 | pinn.load_model('best_model_pinn.pt') 136 | 137 | # Plotting 138 | data = scipy.io.loadmat('NLS.mat') 139 | t = data['tt'].flatten()[:, None] 140 | x = data['x'].flatten()[:, None] 141 | Exact = data['uu'] 142 | Exact_u = np.real(Exact) 143 | Exact_v = np.imag(Exact) 144 | Exact_h = np.sqrt(Exact_u ** 2 + Exact_v ** 2) 145 | X, T = np.meshgrid(x, t) 146 | 147 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 148 | u_star = Exact_u.T.flatten()[:, None] 149 | v_star = Exact_v.T.flatten()[:, None] 150 | h_star = Exact_h.T.flatten()[:, None] 151 | 152 | pred = model(Tensor(X_star).cuda()) 153 | pred_u = pred[:, 0].detach().cpu().numpy() 154 | pred_v = pred[:, 1].detach().cpu().numpy() 155 | H_pred = np.sqrt(pred_u ** 2 + pred_v**2) 156 | H_pred = H_pred.reshape(X.shape) 157 | plt.imshow(H_pred.T, interpolation='nearest', cmap='YlGnBu', 158 | extent=[lb[1], ub[1], lb[0], ub[0]], 159 | origin='lower', aspect='auto') 160 | plt.colorbar() 161 | plt.show() 162 | -------------------------------------------------------------------------------- /examples/Schroedinger_1d/Schroedinger.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io 3 | import torch 4 | from torch import Tensor, ones, stack 5 | from torch.autograd import grad 6 | from torch.utils.data import Dataset 7 | import matplotlib.pyplot as plt 8 | import NeuralSolvers as nsolv 9 | 10 | # Constants 11 | DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' 12 | NUM_EPOCHS = 1000 # 10000 13 | DOMAIN_LOWER_BOUND = np.array([-5.0, 0.0]) 14 | DOMAIN_UPPER_BOUND = np.array([5.0, np.pi / 2]) 15 | NUM_INITIAL_POINTS = 50 16 | NUM_BOUNDARY_POINTS = 50 17 | NUM_COLLOCATION_POINTS = 20000 18 | 19 | def schroedinger1d(x, u): 20 | u_real, u_imag = u[:, 0], u[:, 1] 21 | 22 | grads = ones(u_real.shape, device=u.device) 23 | grad_u_real = grad(u_real, x, create_graph=True, grad_outputs=grads)[0] 24 | grad_u_imag = grad(u_imag, x, create_graph=True, grad_outputs=grads)[0] 25 | 26 | u_real_x, u_real_t = grad_u_real[:, 0], grad_u_real[:, 1] 27 | u_imag_x, u_imag_t = grad_u_imag[:, 0], grad_u_imag[:, 1] 28 | 29 | u_real_xx = grad(u_real_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 30 | u_imag_xx = grad(u_imag_x, x, create_graph=True, grad_outputs=grads)[0][:, 0] 31 | 32 | f_real = u_real_t + 0.5 * u_imag_xx + (u_real ** 2 + u_imag ** 2) * u_imag 33 | f_imag = u_imag_t - 0.5 * u_real_xx - (u_real ** 2 + u_imag ** 2) * u_real 34 | 35 | return stack([f_real, f_imag], 1) 36 | 37 | 38 | class InitialConditionDataset(Dataset): 39 | def __init__(self, n0, file_path = 'NLS.mat',device="cpu"): 40 | super().__init__() 41 | data = scipy.io.loadmat(file_path) 42 | x = data['x'].flatten()[:, None] 43 | Exact = data['uu'] 44 | Exact_u = np.real(Exact) 45 | Exact_v = np.imag(Exact) 46 | idx_x = np.random.choice(x.shape[0], n0, replace=False) 47 | self.x = Tensor(x[idx_x, :]).to(device).float() 48 | self.u = Tensor(Exact_u[idx_x, 0:1]).to(device).float() 49 | self.v = Tensor(Exact_v[idx_x, 0:1]).to(device).float() 50 | self.t = Tensor(np.zeros(self.x.shape)).to(device).float() 51 | 52 | def __len__(self): 53 | return 1 54 | 55 | def __getitem__(self, idx): 56 | x = torch.cat([self.x, self.t], dim=1) 57 | y = torch.cat([self.u, self.v], dim=1) 58 | 59 | return x,y 60 | 61 | 62 | class BoundaryConditionDataset(Dataset): 63 | def __init__(self, nb, lower_bound, upper_bound, file_path = 'NLS.mat', device = "cpu"): 64 | super().__init__() 65 | data = scipy.io.loadmat(file_path) 66 | t = data['tt'].flatten()[:, None] 67 | idx_t = np.random.choice(t.shape[0], nb, replace=False) 68 | tb = t[idx_t, :] 69 | self.x_lb = np.concatenate((np.full_like(tb, lower_bound[0]), tb), 1) 70 | self.x_ub = np.concatenate((np.full_like(tb, upper_bound[0]), tb), 1) 71 | self.x_lb = Tensor(self.x_lb).float().to(device) 72 | self.x_ub = Tensor(self.x_ub).float().to(device) 73 | 74 | def __len__(self): 75 | return 1 76 | 77 | def __getitem__(self, idx): 78 | return self.x_lb, self.x_ub 79 | 80 | 81 | def setup_pinn(file_path = 'NLS.mat', model = None): 82 | ic_dataset = InitialConditionDataset(n0=NUM_INITIAL_POINTS,file_path=file_path, device=DEVICE) 83 | initial_condition = nsolv.pinn.datasets.InitialCondition(ic_dataset, name='Initial Condition loss') 84 | 85 | bc_dataset = BoundaryConditionDataset(nb=NUM_BOUNDARY_POINTS,file_path=file_path, 86 | lower_bound=DOMAIN_LOWER_BOUND,upper_bound=DOMAIN_UPPER_BOUND,device=DEVICE) 87 | periodic_bc_u = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 0, "u periodic boundary condition") 88 | periodic_bc_v = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 1, "v periodic boundary condition") 89 | periodic_bc_u_x = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 0, "u_x periodic boundary condition", 1, 0) 90 | periodic_bc_v_x = nsolv.pinn.datasets.PeriodicBC(bc_dataset, 1, "v_x periodic boundary condition", 1, 0) 91 | 92 | geometry = nsolv.NDCube(DOMAIN_LOWER_BOUND, DOMAIN_UPPER_BOUND, NUM_COLLOCATION_POINTS, NUM_COLLOCATION_POINTS, 93 | nsolv.samplers.LHSSampler(), device=DEVICE) 94 | 95 | pde_loss = nsolv.pinn.PDELoss(geometry, schroedinger1d, name='PDE loss') 96 | 97 | if(model is None): 98 | model = nsolv.models.MLP( 99 | input_size=2, output_size=2, device=DEVICE, 100 | hidden_size=100, num_hidden=4, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 101 | activation=torch.tanh 102 | ) 103 | 104 | return nsolv.PINN(model, 2, 2, pde_loss, initial_condition, 105 | [periodic_bc_u, periodic_bc_v, periodic_bc_u_x, periodic_bc_v_x], device=DEVICE) 106 | 107 | 108 | def train_pinn(pinn, num_epochs, logger = None): 109 | #logger = nsolv.WandbLogger('1D Schrödinger Equation', {"num_epochs": num_epochs}) 110 | pinn.fit(num_epochs, checkpoint_path='checkpoint.pt', restart=True, logger=logger, 111 | lbfgs_finetuning=False, writing_cycle=500) 112 | 113 | 114 | def plot_solution(pinn, file_path = 'NLS.mat'): 115 | data = scipy.io.loadmat(file_path) 116 | t = data['tt'].flatten()[:, None] 117 | x = data['x'].flatten()[:, None] 118 | X, T = np.meshgrid(x, t) 119 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 120 | 121 | pred = pinn(Tensor(X_star).to(DEVICE)) 122 | pred_u = pred[:, 0].detach().cpu().numpy() 123 | pred_v = pred[:, 1].detach().cpu().numpy() 124 | H_pred = np.sqrt(pred_u ** 2 + pred_v ** 2).reshape(X.shape) 125 | 126 | plt.figure(figsize=(10, 8)) 127 | plt.imshow(H_pred.T, interpolation='nearest', cmap='YlGnBu', 128 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 129 | origin='lower', aspect='auto') 130 | plt.ylabel('x') 131 | plt.xlabel('t') 132 | plt.colorbar().set_label('|ψ|') 133 | plt.title("PINN Solution: 1D Schrödinger Equation") 134 | plt.show() 135 | 136 | 137 | def plot_exact_solution(file_path = 'NLS.mat'): 138 | data = scipy.io.loadmat(file_path) 139 | t = data['tt'].flatten()[:, None] 140 | x = data['x'].flatten()[:, None] 141 | Exact = data['uu'] 142 | Exact_u = np.real(Exact) 143 | Exact_v = np.imag(Exact) 144 | Exact_h = np.sqrt(Exact_u ** 2 + Exact_v ** 2) 145 | 146 | plt.figure(figsize=(10, 8)) 147 | plt.imshow(Exact_h.T, interpolation='nearest', cmap='YlGnBu', 148 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 149 | origin='lower', aspect='auto') 150 | plt.ylabel('x') 151 | plt.xlabel('t') 152 | plt.colorbar().set_label('|ψ|') 153 | plt.title("Exact Solution: 1D Schrödinger Equation") 154 | plt.show() 155 | 156 | 157 | def compare_solutions(pinn, file_path = 'NLS.mat'): 158 | data = scipy.io.loadmat(file_path) 159 | t = data['tt'].flatten()[:, None] 160 | x = data['x'].flatten()[:, None] 161 | Exact = data['uu'] 162 | Exact_u = np.real(Exact) 163 | Exact_v = np.imag(Exact) 164 | Exact_h = np.sqrt(Exact_u ** 2 + Exact_v ** 2) 165 | 166 | X, T = np.meshgrid(x, t) 167 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 168 | 169 | pred = pinn(Tensor(X_star).to(DEVICE)) 170 | pred_u = pred[:, 0].detach().cpu().numpy() 171 | pred_v = pred[:, 1].detach().cpu().numpy() 172 | H_pred = np.sqrt(pred_u ** 2 + pred_v ** 2).reshape(X.shape) 173 | 174 | error = np.abs(H_pred - Exact_h.T) 175 | 176 | plt.figure(figsize=(15, 5)) 177 | 178 | plt.subplot(1, 3, 1) 179 | plt.imshow(Exact_h.T, interpolation='nearest', cmap='YlGnBu', 180 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 181 | origin='lower', aspect='auto') 182 | plt.ylabel('x') 183 | plt.xlabel('t') 184 | plt.colorbar().set_label('|ψ|') 185 | plt.title("Exact Solution") 186 | 187 | plt.subplot(1, 3, 2) 188 | plt.imshow(H_pred.T, interpolation='nearest', cmap='YlGnBu', 189 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 190 | origin='lower', aspect='auto') 191 | plt.ylabel('x') 192 | plt.xlabel('t') 193 | plt.colorbar().set_label('|ψ|') 194 | plt.title("PINN Solution") 195 | 196 | plt.subplot(1, 3, 3) 197 | plt.imshow(error.T, interpolation='nearest', cmap='hot', 198 | extent=[DOMAIN_LOWER_BOUND[1], DOMAIN_UPPER_BOUND[1], DOMAIN_LOWER_BOUND[0], DOMAIN_UPPER_BOUND[0]], 199 | origin='lower', aspect='auto') 200 | plt.ylabel('x') 201 | plt.xlabel('t') 202 | plt.colorbar().set_label('Error') 203 | plt.title("Absolute Error") 204 | 205 | plt.tight_layout() 206 | plt.show() 207 | 208 | print(f"Mean Absolute Error: {np.mean(error)}") 209 | print(f"Max Absolute Error: {np.max(error)}") 210 | 211 | 212 | if __name__ == "__main__": 213 | pinn = setup_pinn() 214 | train_pinn(pinn, NUM_EPOCHS) 215 | plot_solution(pinn) 216 | plot_exact_solution() 217 | compare_solutions(pinn) -------------------------------------------------------------------------------- /examples/UKD_Heat_2d/datasets.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module with datasets needed to train HPM model using NeuralSolvers 3 | (see https://github.com/ComputationalRadiationPhysics/NeuralSolvers). 4 | Data is stored in multiple h5 files, one file for one time frame. 5 | Each h5 file is essentially a dictionary with 2 items: 6 | timing (numpy array [1]): relative time point when the image was acquired [s]. 7 | seq (numpy array [640*480]): thermal data at the time point timing. 8 | Initializing a dataset requires argument data_info containing information about the data: 9 | path_data (str): path to data folder. 10 | num_t (int): number of time frames in the dataset. 11 | t_step (int): step between 2 consequent frames in the dataset. 12 | pix_step (int): step between 2 consequent spatieal points in the dataset. 13 | num_x (int): length of an image (640 for UKD data). 14 | num_y (int): width of an image (480 for UKD data). 15 | t_min (float): the time point when the first image in the dataset was acquired [s]. 16 | t_max (float): the time point when the last image in the dataset was acquired [s]. 17 | spat_res (float): pixel resolution of the data (approx. 0.3 for UKD data) [mm2/pixel]. 18 | """ 19 | 20 | import os 21 | import h5py 22 | import torch 23 | import numpy as np 24 | from torch.utils.data import Dataset 25 | from skimage.restoration import denoise_bilateral 26 | from skimage.filters import sobel 27 | from skimage.segmentation import watershed 28 | from scipy.ndimage import binary_fill_holes 29 | from scipy.ndimage import binary_erosion 30 | 31 | def load_frame(path_data, t_frame): 32 | """ 33 | Upload t_frame-th image from data. 34 | Args: 35 | path_data (str): path to data folder. 36 | t_frame (int): number of time frame to take. 37 | Returns: 38 | value (numpy array): image data. 39 | timing (float): relative time point when the image was acquired [s]. 40 | """ 41 | # Check if given path to data is valid 42 | if not os.path.exists( 43 | path_data): 44 | raise FileNotFoundError('Could not find file' + path_data) 45 | # Upload the data 46 | h5_file = h5py.File(path_data + str(t_frame) + '.h5', 'r') 47 | value = np.array(h5_file['seq'][:]) 48 | timing = np.array(h5_file['timing'][:]) 49 | h5_file.close() 50 | return value, timing 51 | 52 | def segmentation(path_data, t_frame, num_x, num_y, params): 53 | """ 54 | Calculate segmentation mask for the brain cortex depicted at time point t_frame. 55 | Segmentation is calculated based on smoothing + watershed algorithm with predefined threshold. 56 | Args: 57 | path_data (str): path to data folder. 58 | t_frame (int): number of time frame to take. 59 | num_x (int): length of an image. 60 | num_y (int): width of an image. 61 | params (list): list of the from [threshold, sigma_color, sigma_spatial]. 62 | threshold (float): absolute minimum height value used during processing. 63 | sigma_color (float): filter sigma in the color space. 64 | sigma_spatial (float): filter sigma in the coordinate space. 65 | Returns: 66 | seg_mask (numpy array [num_x, num_y]): binary segmentation mask. 67 | """ 68 | threshold, sigma_color, sigma_spatial = params 69 | # Upload an image at time point t_frame 70 | h5_file = h5py.File(path_data + str(t_frame) + '.h5', 'r') 71 | value = np.array(h5_file['seq'][:]) 72 | h5_file.close() 73 | # Reshape image from 1D array to 2D array 74 | value = np.array(value).reshape(-1) 75 | value = value.reshape(num_x, num_y) 76 | value = value.astype(np.float) 77 | # Apply bilateralFilter to improve segmentation quality 78 | value = denoise_bilateral( 79 | value, 80 | sigma_color=sigma_color, 81 | sigma_spatial=sigma_spatial, 82 | multichannel=False) 83 | # Segmentation algorithm 84 | elevation_map = sobel(value) 85 | markers = np.zeros_like(value) 86 | markers[value > threshold] = 2 87 | markers[value <= threshold] = 1 88 | seg_mask = watershed(elevation_map, markers) 89 | seg_mask = binary_fill_holes(seg_mask - 1) 90 | seg_mask = np.array(seg_mask, dtype=np.int) 91 | return seg_mask 92 | 93 | class InitialConditionDataset(Dataset): 94 | """ 95 | Dataset with points (x,y,t,u) to train an interpolation model u_hat: u_hat(x,y,t) ≈ u. 96 | """ 97 | def __init__(self, data_info, batch_size, num_batches, segm_params): 98 | """Constructor of the initial condition dataset. 99 | Args: 100 | data_info (dict): dictionary with info about the data (see the module docs). 101 | batch_size (int): size of a mini-batch in the dataset. 102 | num_batches (int): number of mini-batches in the dataset. 103 | """ 104 | self.u_values = [] 105 | self.x_values = [] 106 | self.y_values = [] 107 | self.t_values = [] 108 | # Check if given path to data is valid 109 | if not os.path.exists( 110 | data_info["path_data"]): 111 | raise FileNotFoundError( 112 | 'Could not find file' + 113 | data_info["path_data"]) 114 | # Calculate the segmentation mask for the data 115 | seg_mask = segmentation( 116 | data_info["path_data"], 117 | 0, 118 | data_info["num_x"], 119 | data_info["num_y"], 120 | segm_params) 121 | # Load each t_step-th frame from the dataset 122 | for t_frame in range( 123 | 0, data_info["num_t"], data_info["t_step"]): 124 | # Upload an image from dataset at time point t_frame 125 | u_exact, timing = load_frame(data_info["path_data"], t_frame) 126 | u_exact = u_exact.reshape(data_info["num_x"], data_info["num_y"]) #.astype(np.float) 127 | u_exact = u_exact * seg_mask # apply segmentation 128 | # Sample only each pix_step-th spatial point from an image 129 | for x_i in range( 130 | 0, data_info["num_x"], data_info["pix_step"]): 131 | for y_i in range(0, data_info["num_y"], data_info["pix_step"]): 132 | if seg_mask[x_i, y_i] != 0: # neglect non-cortex data 133 | self.u_values.append(u_exact[x_i, y_i]) 134 | self.x_values.append(x_i) 135 | self.y_values.append(y_i) 136 | self.t_values.append(timing) 137 | 138 | self.u_values = np.array(self.u_values).reshape(-1) 139 | self.x_values = np.array(self.x_values).reshape(-1) 140 | self.y_values = np.array(self.y_values).reshape(-1) 141 | self.t_values = np.array(self.t_values).reshape(-1) 142 | # Sometimes we are loading less files than we specified by batch_size + num_batches 143 | # => adapt num_batches to real number of batches for avoiding empty batches 144 | self.batch_size = batch_size 145 | num_samples = min((num_batches * batch_size, len(self.x_values))) 146 | self.num_batches = num_samples // self.batch_size 147 | # Convert grid coordinates to physical quantities ([mm]) 148 | self.x_values = self.x_values * data_info["spat_res"] 149 | self.y_values = self.y_values * data_info["spat_res"] 150 | # Create lists with boundary values for spatio-temporal coordinates 151 | self.low_bound = [ 152 | self.x_values.min(), 153 | self.y_values.min(), 154 | self.t_values.min()] 155 | self.up_bound = [ 156 | self.x_values.max(), 157 | self.y_values.max(), 158 | self.t_values.max()] 159 | dtype1 = torch.FloatTensor 160 | # Generate random permutation idx 161 | np.random.seed(1234) 162 | rand_idx = np.random.permutation(self.x_values.shape[0]) 163 | # Permutate data points 164 | self.x_values = self.x_values[rand_idx] 165 | self.y_values = self.y_values[rand_idx] 166 | self.t_values = self.t_values[rand_idx] 167 | self.u_values = self.u_values[rand_idx] 168 | # Slice data for training and convert to torch tensors 169 | self.x_values = dtype1(self.x_values[:num_samples]) 170 | self.y_values = dtype1(self.y_values[:num_samples]) 171 | self.t_values = dtype1(self.t_values[:num_samples]) 172 | self.u_values = dtype1(self.u_values[:num_samples]) 173 | self.low_bound = dtype1(self.low_bound) 174 | self.up_bound = dtype1(self.up_bound) 175 | 176 | def __len__(self): 177 | """ 178 | Length of the dataset. 179 | """ 180 | return self.num_batches 181 | 182 | def __getitem__(self, index): 183 | """ 184 | Returns a mini-batch at given index containing X,u. 185 | Args: 186 | index(int): index of the mini-batch. 187 | Returns: 188 | X: spatio-temporal coordinates x,y,t concatenated. 189 | u: real-value function of spatio-temporal coordinates. 190 | """ 191 | # Generate batch for inital solution 192 | x_values = ( 193 | self.x_values[index * self.batch_size: (index + 1) * self.batch_size]) 194 | y_values = ( 195 | self.y_values[index * self.batch_size: (index + 1) * self.batch_size]) 196 | t_values = ( 197 | self.t_values[index * self.batch_size: (index + 1) * self.batch_size]) 198 | u_values = ( 199 | self.u_values[index * self.batch_size: (index + 1) * self.batch_size]) 200 | return torch.stack([x_values, y_values, t_values], 1), u_values.reshape(-1,1) 201 | 202 | class PDEDataset(Dataset): 203 | """ 204 | Dataset with points (x,y,t) to train HPM model on: HPM(x,y,t) ≈ du/dt. 205 | """ 206 | def __init__(self, data_info, batch_size, num_batches, segm_params): 207 | """Constructor of the residual poins dataset. 208 | Args: 209 | data_info (dict): dictionary with info about the data. 210 | batch_size (int): size of a mini-batch in the dataset. 211 | num_batches (int): number of mini-batches in the dataset. 212 | """ 213 | self.x_values = [] 214 | self.y_values = [] 215 | self.t_values = [] 216 | seg_mask = segmentation( 217 | data_info["path_data"], 0, data_info["num_x"], data_info["num_y"], segm_params) 218 | # Consider only each t_step-th frame 219 | for t_frame in range( 220 | 0, data_info["num_t"], data_info["t_step"]): 221 | t_frame = load_frame(data_info["path_data"], t_frame)[1] 222 | # Sample only each pix_step-th spatial point from the range 223 | for x_i in range( 224 | 0, data_info["num_x"], data_info["pix_step"]): 225 | for y_i in range(0, data_info["num_y"], data_info["pix_step"]): 226 | if seg_mask[x_i, y_i] != 0: # neglect non-cortex data 227 | self.x_values.append(x_i) 228 | self.y_values.append(y_i) 229 | self.t_values.append(t_frame) 230 | 231 | self.x_values = np.array(self.x_values).reshape(-1) 232 | self.y_values = np.array(self.y_values).reshape(-1) 233 | self.t_values = np.array(self.t_values).reshape(-1) 234 | # Sometimes we are loading less files than we specified by batch_size + num_batches 235 | # => adapt num_batches to real number of batches for avoiding empty batches 236 | self.batch_size = batch_size 237 | self.num_batches = num_batches 238 | num_samples = min((num_batches * batch_size, len(self.x_values))) 239 | # Convert grid coordinates to physical quantities ([mm]) 240 | self.x_values = self.x_values * data_info["spat_res"] 241 | self.y_values = self.y_values * data_info["spat_res"] 242 | dtype1 = torch.FloatTensor 243 | # Slice data for training and convert to torch tensors 244 | np.random.seed(1234) 245 | rand_idx = np.random.permutation(self.x_values.shape[0]) 246 | # Permutate data points 247 | self.x_values = self.x_values[rand_idx] 248 | self.y_values = self.y_values[rand_idx] 249 | self.t_values = self.t_values[rand_idx] 250 | # Slice data for training and convert to torch tensors 251 | self.x_values = dtype1(self.x_values[:num_samples]) 252 | self.y_values = dtype1(self.y_values[:num_samples]) 253 | self.t_values = dtype1(self.t_values[:num_samples]) 254 | 255 | def __len__(self): 256 | """ 257 | Length of the dataset. 258 | """ 259 | return self.num_batches 260 | 261 | def __getitem__(self, index): 262 | """ 263 | Returns a mini-batch at given index containing X. 264 | Args: 265 | index(int): index of the mini-batch. 266 | Returns: 267 | X: spatio-temporal coordinates x,y,t concatenated. 268 | """ 269 | # Generate batch with residual points 270 | x_values = ( 271 | self.x_values[index * self.batch_size: (index + 1) * self.batch_size]) 272 | y_values = ( 273 | self.y_values[index * self.batch_size: (index + 1) * self.batch_size]) 274 | t_values = ( 275 | self.t_values[index * self.batch_size: (index + 1) * self.batch_size]) 276 | return torch.stack([x_values, y_values, t_values], 1) 277 | 278 | def derivatives(x_values, u_values): 279 | """ 280 | Create an input for the HPM model. 281 | Args: 282 | x_values (torch tensor): concatenated spatio-temporal and grid coordinaties (x,y,t). 283 | u_values (torch tensor): real-value function to differentiate. 284 | Returns: 285 | x, y, t, u, d2u/dx2, d2u/dy2, du/dt concatenated. 286 | """ 287 | # Save input in variables is necessary for gradient calculation 288 | x_values.requires_grad = True 289 | # Calculate derivatives with torch automatic differentiation 290 | # Move to the same device as prediction 291 | grads = torch.ones(u_values.shape, device=u_values.device) 292 | du_dx_values = torch.autograd.grad( 293 | u_values, 294 | x_values, 295 | create_graph=True, 296 | grad_outputs=grads)[0] 297 | #du_dx_values = [du/dx, du/dy, du/dt] 298 | u_x_values = du_dx_values[:, 0].reshape(u_values.shape) 299 | u_y_values = du_dx_values[:, 1].reshape(u_values.shape) 300 | u_t_values = du_dx_values[:, 2].reshape(u_values.shape) 301 | u_xx_values = torch.autograd.grad( 302 | u_x_values, x_values, create_graph=True, grad_outputs=grads)[0] 303 | u_yy_values = torch.autograd.grad( 304 | u_y_values, x_values, create_graph=True, grad_outputs=grads)[0] 305 | #u_xx = [u_xx, u_xy, u_xt] 306 | u_xx_values = u_xx_values[:, 0].reshape(u_values.shape) 307 | #u_yy = [u_yx, u_yy, u_yt] 308 | u_yy_values = u_yy_values[:, 1].reshape(u_values.shape) 309 | x_values, y_values, t_values = x_values.T 310 | 311 | x_values = x_values.reshape(u_values.shape) 312 | y_values = y_values.reshape(u_values.shape) 313 | t_values = t_values.reshape(u_values.shape) 314 | return torch.stack([x_values, y_values, t_values, u_values, u_x_values, u_y_values, 315 | u_xx_values, u_yy_values, u_t_values], 1).squeeze() 316 | -------------------------------------------------------------------------------- /examples/UKD_Heat_2d/pennes_hpm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from argparse import ArgumentParser 3 | from datasets import * 4 | from torch.autograd import Variable 5 | from torch import Tensor, load, device 6 | from numpy import gradient 7 | from scipy.ndimage import median_filter 8 | from scipy.ndimage import binary_erosion as e 9 | from scipy.ndimage import binary_dilation as d 10 | import NeuralSolvers as nsolv 11 | 12 | if __name__ == "__main__": 13 | parser = ArgumentParser() 14 | parser.add_argument("--identifier", type=str, default="UKD_DeepHPM") 15 | parser.add_argument("--device", type = str, default = "GPU") 16 | # Learning parameters 17 | parser.add_argument("--name", type=str) 18 | parser.add_argument("--epochs", type=int, default=1000) 19 | parser.add_argument("--epochs_pt", type=int, default=10) 20 | parser.add_argument("--learning_rate", type=float, default=1e-4) 21 | 22 | parser.add_argument("--use_wandb", type=int, default=1) 23 | parser.add_argument("--weight", type=float, default=1.) 24 | parser.add_argument("--weight_hpm", type=float, default=1.) 25 | # Dataset parameters 26 | parser.add_argument("--path_data", type=str) 27 | parser.add_argument("--num_t", type=int) 28 | parser.add_argument("--t_step", type=int, default=5) 29 | parser.add_argument("--pix_step", type=int, default=1) 30 | parser.add_argument("--batch_size", type=int, default=4096) 31 | parser.add_argument("--num_batches", type=int, default=28125) 32 | # Model parameters 33 | parser.add_argument("--hidden_size", type=int, default=500) 34 | parser.add_argument("--num_hidden", type=int, default=3) 35 | parser.add_argument("--convection", type=int, default=1) 36 | parser.add_argument("--linear_u", type=int, default=1) 37 | parser.add_argument("--cold_bolus", type=int, default=0) 38 | # Other parameters 39 | parser.add_argument("--pretrained", type=int, default=0) 40 | parser.add_argument("--pretrained_name", type=str, default='') 41 | args = parser.parse_args() 42 | 43 | # Information about the data 44 | data_info = { 45 | "path_data": args.path_data, 46 | "num_t": args.num_t, 47 | "t_step": args.t_step, 48 | "pix_step": args.pix_step, 49 | "num_x": 640, 50 | "num_y": 480, 51 | "t_min": load_frame(args.path_data, 0)[1].item(), 52 | "t_max": load_frame(args.path_data, args.num_t)[1].item(), 53 | "spat_res": 0.3 54 | } 55 | # Empirically found segmentation parameters for UKD data 56 | segm_params = [32.4, 5, 5] if not args.cold_bolus else [31.0, 15, 15] 57 | # Use half of the available data points 0.5*(#time points * #grid points * segm.coef) 58 | num_batches = int(((args.num_t*640*480*0.25)*0.5)//args.batch_size) 59 | # Create Initial Condition & PDE datasets 60 | ic_dataset = InitialConditionDataset( 61 | data_info, args.batch_size, num_batches, segm_params) 62 | initial_condition = nsolv.InitialCondition(dataset=ic_dataset, name="Initial Condition", weight=args.weight) 63 | pde_dataset = PDEDataset( 64 | data_info, 65 | args.batch_size, 66 | num_batches, 67 | segm_params) 68 | low_bound = Tensor([49.8000, 22.2000, 0.0]) if not args.cold_bolus else Tensor([34.2000, 4.5000, 0.0]) 69 | up_bound = Tensor([148.5000, 120.6000, 60.0]) if not args.cold_bolus else Tensor([164.4000, 112.2000, 50.0]) 70 | # Interpolation model 71 | # Input: spatiotemporal coordinates of a point x,y,t. 72 | # Output: temperature u at the point. 73 | model = nsolv.models.MLP(input_size=3, 74 | output_size=1, 75 | hidden_size=args.hidden_size, 76 | num_hidden=args.num_hidden, 77 | lb=low_bound, 78 | ub=up_bound) 79 | 80 | pinn_path = "/bigdata/hplsim/aipp/Maksim/default_best_model_pinn.pt" 81 | model.load_state_dict(torch.load(pinn_path)) 82 | 83 | # Heat source model 84 | # Input: spatiotemporal coordinates of a point x,y,t. 85 | # Output: temperature u at the point. 86 | hs_net = nsolv.models.MLP(input_size=3, 87 | output_size=1, 88 | hidden_size=100, 89 | num_hidden=1, 90 | lb=low_bound, 91 | ub=up_bound) 92 | 93 | if args.pretrained: 94 | if len(args.pretrained_name): 95 | model.load_state_dict(load('./models/pretrained/' + args.pretrained_name + '.pt', map_location=device('cpu'))) 96 | else: 97 | raise ValueError('pretrained model is not given but requested') 98 | # HPM model: du/dt = convection + linear(u) 99 | # Input: output of the derivatives function for a point x,y,t. 100 | # Output: du/dt value for the point. 101 | config = {'convection': 1, 'linear_u': 1, 'heat_source':1} 102 | hpm_model = nsolv.models.PennesHPM(config, hs_net = hs_net) 103 | hpm_loss = nsolv.pinn.HPMLoss.HPMLoss(dataset=pde_dataset, hpm_input=derivatives, hpm_model=hpm_model, name="Pennes Equation", weight=args.weight_hpm) 104 | logger = nsolv.loggers.WandbLogger('thermal_hpm', args) 105 | # Initialize and fit an physics-informed neural network 106 | pinn = nsolv.PINN( 107 | model, 108 | input_dimension=8, 109 | output_dimension=1, 110 | pde_loss=hpm_loss, 111 | initial_condition=initial_condition, 112 | boundary_condition=None, 113 | device=args.device, 114 | use_horovod=args.use_horovod) 115 | pinn.fit(pretraining = args.epochs_pt, epochs=args.epochs, epochs_pt=args.epochs_pt, optimizer='Adam', learning_rate=args.learning_rate, lbfgs_finetuning=False, pinn_path='./models/' + args.name+'_best_model_pinn.pt', hpm_path='./models/' + args.name+'_best_model_hpm.pt', logger=logger, writing_cycle=1) 116 | -------------------------------------------------------------------------------- /examples/UKD_Heat_2d/run_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #SBATCH -p gpu 4 | #SBATCH -A fwkt_v100 5 | #SBATCH -t 23:59:00 6 | #SBATCH --nodes=1 7 | #SBATCH --ntasks-per-node=4 8 | #SBATCH --cpus-per-task=6 9 | #SBATCH -o ./logs/hostname_%j.out 10 | #SBATCH --gres=gpu:4 11 | #SBATCH --mem=0 12 | 13 | module load cuda/11.2 14 | module load python 15 | module load gcc/5.5.0 16 | module load openmpi/3.1.2 17 | 18 | source /home/zhdano82/hpmtraining/horoenv/bin/activate 19 | cd /home/zhdano82/hpmtraining/ukd/NeuralSolvers/examples/2D_UKD_Heat 20 | 21 | mpirun -np 4 \ 22 | -bind-to none -map-by slot \ 23 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \ 24 | -mca pml ob1 -mca btl ^openib \ 25 | python pennes_hpm_.py --num_t 2999 --name 05_hpm --epochs_pt 1 --epochs 100 --path_data /home/zhdano82/hpmtraining/smooth_data/data_0_05/ --use_horovod 1 --batch_size 512 --weight_j 0.01 --pretrained 1 --pretrained_name '05' 26 | -------------------------------------------------------------------------------- /images/1D_Schroedinger_training.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/images/1D_Schroedinger_training.gif -------------------------------------------------------------------------------- /images/API_PINN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/images/API_PINN.png -------------------------------------------------------------------------------- /images/cropped_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/images/cropped_logo.png -------------------------------------------------------------------------------- /images/scalability.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Photon-AI-Research/NeuralSolvers/203350c7254fba4e78413faec6264fc8406c8e16/images/scalability.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs==2.4.3 2 | aiohttp==3.11.6 3 | aiosignal==1.3.1 4 | annotated-types==0.7.0 5 | anyio==4.6.2.post1 6 | appnope==0.1.4 7 | argon2-cffi==23.1.0 8 | argon2-cffi-bindings==21.2.0 9 | arrow==1.3.0 10 | asttokens==2.4.1 11 | async-lru==2.0.4 12 | async-timeout==4.0.3 13 | attrs==24.2.0 14 | babel==2.16.0 15 | beautifulsoup4==4.12.3 16 | bleach==6.2.0 17 | certifi==2024.8.30 18 | cffi==1.17.1 19 | charset-normalizer==3.4.0 20 | click==8.1.7 21 | comm==0.2.2 22 | contourpy==1.3.0 23 | cycler==0.12.1 24 | dataclasses-json==0.6.7 25 | debugpy==1.8.8 26 | decorator==5.1.1 27 | defusedxml==0.7.1 28 | distro==1.9.0 29 | docker-pycreds==0.4.0 30 | exceptiongroup==1.2.2 31 | executing==2.1.0 32 | fastjsonschema==2.20.0 33 | filelock==3.16.1 34 | fonttools==4.54.1 35 | fqdn==1.5.1 36 | frozenlist==1.5.0 37 | fsspec==2024.10.0 38 | gitdb==4.0.11 39 | GitPython==3.1.43 40 | h11==0.14.0 41 | h5py==3.12.1 42 | httpcore==1.0.7 43 | httpx==0.27.2 44 | httpx-sse==0.4.0 45 | idna==3.10 46 | imageio==2.36.1 47 | ipykernel==6.29.5 48 | ipython==8.29.0 49 | ipywidgets==8.1.5 50 | isoduration==20.11.0 51 | jedi==0.19.2 52 | Jinja2==3.1.4 53 | jiter==0.7.1 54 | json5==0.9.28 55 | jsonpatch==1.33 56 | jsonpointer==3.0.0 57 | jsonschema==4.23.0 58 | jsonschema-specifications==2024.10.1 59 | jupyter==1.1.1 60 | jupyter-console==6.6.3 61 | jupyter-events==0.10.0 62 | jupyter-lsp==2.2.5 63 | jupyter_client==8.6.3 64 | jupyter_core==5.7.2 65 | jupyter_server==2.14.2 66 | jupyter_server_terminals==0.5.3 67 | jupyterlab==4.2.6 68 | jupyterlab_pygments==0.3.0 69 | jupyterlab_server==2.27.3 70 | jupyterlab_widgets==3.0.13 71 | kiwisolver==1.4.7 72 | langchain==0.3.7 73 | langchain-community==0.3.7 74 | langchain-core==0.3.19 75 | langchain-openai==0.2.9 76 | langchain-text-splitters==0.3.2 77 | langsmith==0.1.143 78 | lazy_loader==0.4 79 | MarkupSafe==3.0.2 80 | marshmallow==3.23.1 81 | matplotlib==3.9.2 82 | matplotlib-inline==0.1.7 83 | mistune==3.0.2 84 | mpmath==1.3.0 85 | multidict==6.1.0 86 | mypy-extensions==1.0.0 87 | nbclient==0.10.0 88 | nbconvert==7.16.4 89 | nbformat==5.10.4 90 | nest-asyncio==1.6.0 91 | networkx==3.4.2 92 | notebook==7.2.2 93 | notebook_shim==0.2.4 94 | numpy==1.26.4 95 | openai==1.55.0 96 | opencv-python==4.10.0.84 97 | orjson==3.10.11 98 | overrides==7.7.0 99 | packaging==24.2 100 | pandas==2.2.3 101 | pandocfilters==1.5.1 102 | parso==0.8.4 103 | pexpect==4.9.0 104 | pillow==11.0.0 105 | platformdirs==4.3.6 106 | prometheus_client==0.21.0 107 | prompt_toolkit==3.0.48 108 | propcache==0.2.0 109 | protobuf==5.29.1 110 | psutil==6.1.0 111 | ptyprocess==0.7.0 112 | pure_eval==0.2.3 113 | py-cpuinfo==9.0.0 114 | pycparser==2.22 115 | pydantic==2.9.2 116 | pydantic-settings==2.6.1 117 | pydantic_core==2.23.4 118 | pyDOE==0.3.8 119 | Pygments==2.18.0 120 | pyparsing==3.2.0 121 | python-dateutil==2.9.0.post0 122 | python-dotenv==1.0.1 123 | python-json-logger==2.0.7 124 | pytz==2024.2 125 | PyYAML==6.0.2 126 | pyzmq==26.2.0 127 | referencing==0.35.1 128 | regex==2024.11.6 129 | requests==2.32.3 130 | requests-toolbelt==1.0.0 131 | rfc3339-validator==0.1.4 132 | rfc3986-validator==0.1.1 133 | rpds-py==0.21.0 134 | scikit-image==0.24.0 135 | scipy==1.14.1 136 | seaborn==0.13.2 137 | Send2Trash==1.8.3 138 | sentry-sdk==2.19.2 139 | setproctitle==1.3.4 140 | six==1.16.0 141 | smmap==5.0.1 142 | sniffio==1.3.1 143 | soupsieve==2.6 144 | SQLAlchemy==2.0.35 145 | stack-data==0.6.3 146 | sympy==1.13.1 147 | tenacity==9.0.0 148 | tensorboardX==2.6.2.2 149 | terminado==0.18.1 150 | tifffile==2024.9.20 151 | tiktoken==0.8.0 152 | tinycss2==1.4.0 153 | tomli==2.1.0 154 | torch==2.5.1 155 | torchvision==0.20.1 156 | tornado==6.4.1 157 | tqdm==4.67.0 158 | traitlets==5.14.3 159 | types-python-dateutil==2.9.0.20241003 160 | typing-inspect==0.9.0 161 | typing_extensions==4.12.2 162 | tzdata==2024.2 163 | ultralytics==8.3.34 164 | ultralytics-thop==2.0.12 165 | uri-template==1.3.0 166 | urllib3==2.2.3 167 | wandb==0.19.0 168 | wcwidth==0.2.13 169 | webcolors==24.11.1 170 | webencodings==0.5.1 171 | websocket-client==1.8.0 172 | widgetsnbextension==4.0.13 173 | yarl==1.17.2 174 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setup( 7 | name="NeuralSolvers", 8 | version="0.1.0", 9 | author="Nico Hoffmann, Patrick Stiller, Maksim Zhdanov, Jeyhun Rustamov, Raj Dhansukhbhai Sutariya", 10 | author_email="nico.hoffmann@saxony.ai", 11 | description="A framework for solving partial differential equations (PDEs) and inverse problems using physics-informed neural networks (PINNs) at scale", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/Photon-AI-Research/NeuralSolvers", 15 | packages=find_packages(), 16 | classifiers=[ 17 | "Development Status :: 3 - Alpha", 18 | "Intended Audience :: Science/Research", 19 | "Topic :: Scientific/Engineering :: Physics", 20 | "Topic :: Scientific/Engineering :: Mathematics", 21 | "Programming Language :: Python :: 3", 22 | "License :: OSI Approved :: MIT License", 23 | "Operating System :: OS Independent", 24 | ], 25 | python_requires=">=3.9", 26 | install_requires=[ 27 | ], 28 | extras_require={ 29 | }, 30 | entry_points={ 31 | "console_scripts": [ 32 | "neuralsolvers=NeuralSolvers.cli:main", 33 | ], 34 | }, 35 | include_package_data=True, 36 | package_data={ 37 | "NeuralSolvers": ["examples/*"], 38 | }, 39 | ) -------------------------------------------------------------------------------- /tools/benchmarks_1d.py: -------------------------------------------------------------------------------- 1 | from examples.Heat_Equation_1d import Heat_Equation 2 | from examples.Schroedinger_1d import Schroedinger 3 | from examples.Burgers_Equation_1d import Burgers_Equation 4 | from NeuralSolvers.loggers.Python_Logger import PythonLogger 5 | from NeuralSolvers.models import ModulatedMLP 6 | import numpy as np 7 | import torch 8 | import scipy 9 | 10 | DEVICE = 'cuda' 11 | NUM_EPOCHS = 1000 # 50000 12 | DOMAIN_LOWER_BOUND = np.array([-1, 0.0]) 13 | DOMAIN_UPPER_BOUND = np.array([1.0, 1.0]) 14 | VISCOSITY = 0.01 / np.pi 15 | NOISE = 0.0 16 | NUM_INITIAL_POINTS = 100 17 | NUM_COLLOCATION_POINTS = 10000 18 | 19 | 20 | def run_benchmarks(): 21 | plot_results_debug = True 22 | 23 | # the loggers store final loss for reasons of comparison 24 | burger_log = PythonLogger() 25 | heat_log = PythonLogger() 26 | schrodinger_log = PythonLogger() 27 | burger_log_mod = PythonLogger() 28 | heat_log_mod = PythonLogger() 29 | schrodinger_log_mod = PythonLogger() 30 | 31 | ''' 32 | Burgers Equation 33 | ''' 34 | print("*** Burgers Equation ***") 35 | 36 | DEVICE = 'cuda' 37 | DOMAIN_LOWER_BOUND = np.array([-1, 0.0]) 38 | DOMAIN_UPPER_BOUND = np.array([1.0, 1.0]) 39 | model = ModulatedMLP( 40 | input_size=2, output_size=1, device=DEVICE, 41 | hidden_size=40, num_hidden=8, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 42 | activation=torch.tanh 43 | ) 44 | 45 | # premodulation (cuda) 46 | #[2024-12-14 23:03:02.502914]:Epoch 1000/1000 | PINN Loss 0.0066323378 | Initial Condition loss: 0.003978 | PDE loss: 0.002655 | Epoch Duration 0.02852 47 | 48 | ## vanilla PINN 49 | # [2024-12-14 21:57:03.422603]:Epoch 1000/1000 | PINN Loss 0.0302316695 | Initial Condition loss: 0.020797 | PDE loss: 0.009435 | Epoch Duration 0.00928 50 | 51 | pinn = Burgers_Equation.setup_pinn(model=model, file_path = '../examples/Burgers_Equation_1d/burgers_shock.mat') 52 | #Burgers_Equation.train_pinn(pinn, Burgers_Equation.NUM_EPOCHS, logger=burger_log_mod) 53 | 54 | pinn = Burgers_Equation.setup_pinn(model=None, file_path = '../examples/Burgers_Equation_1d/burgers_shock.mat') 55 | #Burgers_Equation.train_pinn(pinn, Burgers_Equation.NUM_EPOCHS, logger=burger_log) 56 | 57 | if(plot_results_debug): 58 | t, x, exact_solution = Burgers_Equation.load_burger_data(file_path = '../examples/Burgers_Equation_1d/burgers_shock.mat') 59 | Burgers_Equation.plot_solution(pinn, t, x, exact_solution) 60 | 61 | ''' 62 | Heat Equation 63 | ''' 64 | print("*** Heat Equation ***") 65 | DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' 66 | DOMAIN_LOWER_BOUND = np.array([0, 0.0]) 67 | DOMAIN_UPPER_BOUND = np.array([1.0, 2.0]) 68 | 69 | model = ModulatedMLP( 70 | input_size=2, output_size=1, device=DEVICE, 71 | hidden_size=100, num_hidden=4, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 72 | activation=torch.tanh 73 | ) 74 | model = None 75 | 76 | # premodulation (mps) 77 | #[2024-12-15 17:58:13.849840]:Epoch 1000/1000 | PINN Loss 0.0001808163 | Initial Condition loss loss: 0.000020 | PDE loss loss: 0.000151 | Lower dirichlet BC loss: 0.000005 | Upper dirichlet BC loss: 0.000004 | Epoch Duration 0.25264 78 | 79 | # vanilla PINN 80 | # [2024-12-15 19:35:19.770822]:Epoch 1000/1000 | PINN Loss 0.0004855946 | Initial Condition loss loss: 0.000077 | PDE loss loss: 0.000373 | Lower dirichlet BC loss: 0.000014 | Upper dirichlet BC loss: 0.000022 | Epoch Duration 0.07480 81 | 82 | pinn = Heat_Equation.setup_pinn(model=model) 83 | Heat_Equation.train_pinn(pinn, Heat_Equation.NUM_EPOCHS, logger=heat_log_mod) 84 | 85 | Heat_Equation.plot_solution(pinn) 86 | Heat_Equation.plot_analytical_solution() 87 | 88 | pinn = Heat_Equation.setup_pinn(model=None) 89 | Heat_Equation.train_pinn(pinn, Heat_Equation.NUM_EPOCHS, logger=heat_log) 90 | 91 | Heat_Equation.plot_solution(pinn) 92 | Heat_Equation.plot_analytical_solution() 93 | 94 | if (plot_results_debug): 95 | Heat_Equation.plot_solution(pinn) 96 | Heat_Equation.plot_analytical_solution() 97 | 98 | ''' 99 | Schroedinger Equation 100 | ''' 101 | print("*** Schroedinger Equation ***") 102 | # premodulation (mps) 103 | #[2024-12-15 18:12:43.802903]:Epoch 1000/1000 | PINN Loss 0.0008542041 | Initial Condition loss loss: 0.000132 | PDE loss loss: 0.000715 | u periodic boundary condition loss: 0.000005 | v periodic boundary condition loss: 0.000000 | u_x periodic boundary condition loss: 0.000002 | v_x periodic boundary condition loss: 0.000001 | Epoch Duration 0.50807 104 | 105 | # vanilla PINN 106 | # [2024-12-15 19:37:46.156288]:Epoch 1000/1000 | PINN Loss 0.0211467128 | Initial Condition loss loss: 0.011177 | PDE loss loss: 0.009758 | u periodic boundary condition loss: 0.000022 | v periodic boundary condition loss: 0.000160 | u_x periodic boundary condition loss: 0.000020 | v_x periodic boundary condition loss: 0.000010 | Epoch Duration 0.14279 107 | 108 | file_path = '../examples/Schroedinger_1d/NLS.mat' 109 | DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' 110 | DOMAIN_LOWER_BOUND = np.array([-5.0, 0.0]) 111 | DOMAIN_UPPER_BOUND = np.array([5.0, np.pi / 2]) 112 | model = ModulatedMLP( 113 | input_size=2, output_size=2, device=DEVICE, 114 | hidden_size=100, num_hidden=4, lb=DOMAIN_LOWER_BOUND, ub=DOMAIN_UPPER_BOUND, 115 | activation=torch.tanh 116 | ) 117 | pinn = Schroedinger.setup_pinn(file_path=file_path, model=model) 118 | #Schroedinger.train_pinn(pinn, Schroedinger.NUM_EPOCHS, logger=schrodinger_log_mod) 119 | 120 | pinn = Schroedinger.setup_pinn(file_path=file_path, model=None) 121 | #Schroedinger.train_pinn(pinn, Schroedinger.NUM_EPOCHS, logger=schrodinger_log) 122 | 123 | if (plot_results_debug): 124 | Schroedinger.plot_solution(pinn, file_path=file_path) 125 | Schroedinger.plot_exact_solution(file_path=file_path) 126 | Schroedinger.compare_solutions(pinn, file_path=file_path) 127 | 128 | print("*** Burgers Equation ***") 129 | print(", ".join([f"{key}: {value}" for key, value in burger_log.loss_history.items()])) 130 | print(", ".join([f"{key}: {value}" for key, value in burger_log_mod.loss_history.items()])) 131 | 132 | print("*** Heat Equation ***") 133 | print(", ".join([f"{key}: {value}" for key, value in heat_log.loss_history.items()])) 134 | print(", ".join([f"{key}: {value}" for key, value in heat_log_mod.loss_history.items()])) 135 | 136 | print("*** Schroedinger Equation ***") 137 | print(", ".join([f"{key}: {value}" for key, value in schrodinger_log.loss_history.items()])) 138 | print(", ".join([f"{key}: {value}" for key, value in schrodinger_log_mod.loss_history.items()])) 139 | 140 | ''' 141 | *** Burgers Equation *** 142 | Baseline PINN: 0.009729756973683834, Non-Weighted PINN Loss: 0.004864878486841917, Initial Condition: 0.00315843359567225, PDE: 0.0017064448911696672, Initial Condition_weight: 1.0, PDE_weight: 1.0 143 | Modulated PINN: 0.0039318641647696495, Non-Weighted PINN Loss: 0.0019659320823848248, Initial Condition: 0.0007832138217054307, PDE: 0.001182718318887055, Initial Condition_weight: 1.0, PDE_weight: 1.0 144 | *** Heat Equation *** 145 | Baseline PINN: 0.0004343787732068449, Non-Weighted PINN Loss: 0.00010859469330171123, Initial Condition loss: 2.2767631890019402e-05, PDE loss: 7.915394962765276e-05, Lower dirichlet BC: 3.209661826986121e-06, Upper dirichlet BC: 3.463446319074137e-06, Initial Condition loss_weight: 1.0, PDE loss_weight: 1, Lower dirichlet BC_weight: 1.0, Upper dirichlet BC_weight: 1.0 146 | Modulated PINN: 0.0007307189516723156, Non-Weighted PINN Loss: 0.0001826797379180789, Initial Condition loss: 3.6458401154959574e-05, PDE loss: 0.00012835663801524788, Lower dirichlet BC: 1.0459781151439529e-05, Upper dirichlet BC: 7.4049144132004585e-06, Initial Condition loss_weight: 1.0, PDE loss_weight: 1, Lower dirichlet BC_weight: 1.0, Upper dirichlet BC_weight: 1.0 147 | *** Schroedinger Equation *** 148 | Baseline PINN: 0.020892612636089325, Non-Weighted PINN Loss: 0.003482102183625102, Initial Condition loss: 0.0018387357704341412, PDE loss: 0.0016373837133869529, u periodic boundary condition: 7.1095240627983e-07, v periodic boundary condition: 4.5075793764226546e-07, u_x periodic boundary condition: 3.362873030710034e-06, v_x periodic boundary condition: 1.4582412859454053e-06, Initial Condition loss_weight: 1.0, PDE loss_weight: 1.0, u periodic boundary condition_weight: 1.0, v periodic boundary condition_weight: 1.0, u_x periodic boundary condition_weight: 1.0, v_x periodic boundary condition_weight: 1.0 149 | Modulated PINN: 0.0006141893682070076, Non-Weighted PINN Loss: 0.00010236489470116794, Initial Condition loss: 6.0788215705542825e-06, PDE loss: 9.093437984120101e-05, u periodic boundary condition: 3.1029617275635246e-07, v periodic boundary condition: 4.458372131921351e-06, u_x periodic boundary condition: 3.841959141936968e-07, v_x periodic boundary condition: 1.9883377433416172e-07, Initial Condition loss_weight: 1.0, PDE loss_weight: 1.0, u periodic boundary condition_weight: 1.0, v periodic boundary condition_weight: 1.0, u_x periodic boundary condition_weight: 1.0, v_x periodic boundary condition_weight: 1.0 150 | ''' 151 | 152 | if __name__ == "__main__": 153 | run_benchmarks() -------------------------------------------------------------------------------- /tools/embedding_tests.py: -------------------------------------------------------------------------------- 1 | import scipy 2 | import numpy as np 3 | import torch 4 | from torchvision.models import vit_b_16 5 | from torchvision.transforms import Compose, Resize, Normalize, ToTensor 6 | 7 | def run_test(): 8 | data = scipy.io.loadmat('../examples/Burgers_Equation_1d/burgers_shock.mat') 9 | t = data['t'].flatten()[:, None] 10 | x = data['x'].flatten()[:, None] 11 | 12 | Exact = torch.Tensor(np.real(data['usol']).T).float().to('cuda') 13 | 14 | Exact = Exact.unsqueeze(0).unsqueeze(0).repeat(1, 3, 1, 1) 15 | 16 | vit_model = vit_b_16(pretrained=True).to('cuda') 17 | vit_model.eval() # Set to evaluation mode 18 | vit_transform = Compose([ 19 | Resize((224, 224)), # Resize to ViT input size 20 | 21 | # ImageNet normalization (if using pretrained ViT) 22 | Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 23 | ]) 24 | 25 | # Simple [-1,1] normalization (for custom data) 26 | #transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 27 | 28 | with torch.no_grad(): 29 | u_i_image = vit_transform(Exact) 30 | vit_embedding = vit_model(u_i_image) # Extract ViT embeddings 31 | 32 | print("mu") 33 | 34 | ModulatedMLP 35 | 36 | if __name__ == "__main__": 37 | run_test() --------------------------------------------------------------------------------